refactored code update
This commit is contained in:
commit
39d0c8f0db
5
.editorconfig
Normal file
5
.editorconfig
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
[*]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
15
.gitignore
vendored
Normal file
15
.gitignore
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
build/
|
||||||
|
node_modules/
|
||||||
|
deps/librdkafka
|
||||||
|
npm-debug.log
|
||||||
|
|
||||||
|
docs
|
||||||
|
|
||||||
|
deps/*
|
||||||
|
!deps/*.gyp
|
||||||
|
!deps/windows-install.*
|
||||||
|
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
package-lock.json
|
||||||
|
.vscode
|
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
[submodule "deps/librdkafka"]
|
||||||
|
path = deps/librdkafka
|
||||||
|
url = https://github.com/edenhill/librdkafka.git
|
1
.jshintignore
Normal file
1
.jshintignore
Normal file
@ -0,0 +1 @@
|
|||||||
|
README.md
|
23
.jshintrc
Normal file
23
.jshintrc
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"node": true,
|
||||||
|
"mocha": true,
|
||||||
|
"browser": false,
|
||||||
|
"boss": true,
|
||||||
|
"curly": true,
|
||||||
|
"debug": false,
|
||||||
|
"devel": false,
|
||||||
|
"eqeqeq": true,
|
||||||
|
"evil": true,
|
||||||
|
"forin": false,
|
||||||
|
"latedef": false,
|
||||||
|
"noarg": true,
|
||||||
|
"nonew": true,
|
||||||
|
"nomen": false,
|
||||||
|
"onevar": false,
|
||||||
|
"plusplus": false,
|
||||||
|
"regexp": false,
|
||||||
|
"undef": true,
|
||||||
|
"strict": false,
|
||||||
|
"white": false,
|
||||||
|
"eqnull": true
|
||||||
|
}
|
10
.npmignore
Normal file
10
.npmignore
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
deps/*
|
||||||
|
!deps/librdkafka.gyp
|
||||||
|
!deps/librdkafka
|
||||||
|
!deps/windows-install.*
|
||||||
|
.gitmodules
|
||||||
|
Dockerfile
|
||||||
|
deps/librdkafka/config.h
|
||||||
|
build
|
||||||
|
.github
|
||||||
|
.vscode
|
67
.travis.yml
Normal file
67
.travis.yml
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
language: node_js
|
||||||
|
|
||||||
|
cache:
|
||||||
|
directories:
|
||||||
|
- node_modules
|
||||||
|
node_js:
|
||||||
|
- "4"
|
||||||
|
- "6"
|
||||||
|
- "8"
|
||||||
|
- "10"
|
||||||
|
- "12"
|
||||||
|
- "13"
|
||||||
|
- "14"
|
||||||
|
- "15"
|
||||||
|
- "16"
|
||||||
|
sudo: required
|
||||||
|
services: docker
|
||||||
|
before_install:
|
||||||
|
- if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_BUILD_STAGE_NAME" =~ Test.* ]]; then ./run_docker.sh; fi
|
||||||
|
# - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install openssl; fi
|
||||||
|
- if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then ./win_install.bat; fi
|
||||||
|
install:
|
||||||
|
- if [[ "$TRAVIS_BUILD_STAGE_NAME" =~ Test.* ]]; then npm install; fi
|
||||||
|
|
||||||
|
script:
|
||||||
|
- if [[ "$TRAVIS_BUILD_STAGE_NAME" =~ Test.* ]]; then make lint && make test && make check; else echo $TRAVIS_BUILD_STAGE_NAME; fi
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
include:
|
||||||
|
- stage: test_on_mac
|
||||||
|
os: osx
|
||||||
|
osx_image: xcode10
|
||||||
|
env: CPPFLAGS=-I/usr/local/opt/openssl/include LDFLAGS=-L/usr/local/opt/openssl/lib
|
||||||
|
node_js:
|
||||||
|
- "10"
|
||||||
|
- stage: test_on_win
|
||||||
|
os: windows
|
||||||
|
node_js:
|
||||||
|
- "10"
|
||||||
|
# - stage: create_doc
|
||||||
|
# provider: script
|
||||||
|
# before_deploy:
|
||||||
|
# - openssl aes-256-cbc -K $encrypted_a2e08d5c220e_key -iv $encrypted_a2e08d5c220e_iv -in deploy.enc -out /tmp/deploy -d
|
||||||
|
# - eval "$(ssh-agent -s)"
|
||||||
|
# - chmod 600 /tmp/deploy
|
||||||
|
# - ssh-add /tmp/deploy
|
||||||
|
# script:
|
||||||
|
# - "./make_docs.sh"
|
||||||
|
# if: (tag =~ ^v)
|
||||||
|
- stage: deploy
|
||||||
|
os: linux
|
||||||
|
node_js:
|
||||||
|
- "10"
|
||||||
|
script: skip
|
||||||
|
if: (tag =~ ^v)
|
||||||
|
before_deploy:
|
||||||
|
- npm install --no-save semver
|
||||||
|
deploy:
|
||||||
|
provider: npm
|
||||||
|
skip_cleanup: true
|
||||||
|
email: webmakersteve@gmail.com
|
||||||
|
api_key:
|
||||||
|
secure: "GUI9X1TnemXIMj5nZDqjrB3zBdMVxhAVKz18BN8TzsQBBne7BOrZ7L9yM3nOwxydm53NAPHFnEjYhYhvM+qhzLnik+XFQ7O3i9rS0hAvzQOBXfup5daib5A8VPMEdTRCXPS3hZ+p5n3ZBruSCQnyTu+HL1SGH//L+j52T5hiOZ3HauhCx9Q0myTBhB6CU5L8yscUUMg12qL7Uw4jsJLfgBRq6hAxphKXfvKt+NJMOWMSmQMbC1FxgfEgIkjFUtasWcUqCmfqx+983XdhhGhC64CYkusSZynNxnsTAosZGJiIZTPYXXL+imBgsEpsMCnW/id/qwaDzRueh2vWlBq2Lk9XSU3VOlKf8nMCJafc3CVjdOZvekyk+WU23gFd4Tpmwk0OtOOM2CKMoNxeMfNvA7ovQ96PP+LDdnRvdoFZV/oX3v3jaXR6DXFd8jnqRTpK4qj7qFO9eWgy1vXdfpwwS2gGVkFvSlWa3niBWzfLSL49Lm7UBwPKJYq0V5taO2dXz8nniTBAQJDcIEKaJkG6IYw5qnLpDLHB3jIw4NwYWw6f/cB5KZXCSeoGBOb9/61XD6Uq0QoQLIhj/vTKmOjNtZHlBzHUoKGYrP+SQRk9BgYh/Cr0azVhSpm2Zjz1fTJ1kYWKzHU8JPyWf6/isKQM4FLhFvWihy/kxkVTkZGR0b4="
|
||||||
|
on:
|
||||||
|
tags: true
|
||||||
|
after_deploy:
|
||||||
|
- cat package.json
|
192
CONTRIBUTING.md
Normal file
192
CONTRIBUTING.md
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
# Contributing to `node-rdkafka`
|
||||||
|
|
||||||
|
:+1::tada: First off, thanks for taking the time to contribute! :tada::+1:
|
||||||
|
|
||||||
|
The following is a set of guidelines for contributing to `node-rdkafka`
|
||||||
|
which is hosted in the [Blizzard Organization](https://github.com/blizzard)
|
||||||
|
on GitHub. This document lists rules, guidelines, and help getting started,
|
||||||
|
so if you feel something is missing feel free to send a pull request.
|
||||||
|
|
||||||
|
#### Table Of Contents
|
||||||
|
|
||||||
|
[What should I know before I get started?](#what-should-i-know-before-i-get-started)
|
||||||
|
* [Contributor Agreement](#contributor-agreement)
|
||||||
|
|
||||||
|
[How Can I Contribute?](#how-can-i-contribute)
|
||||||
|
* [Reporting Bugs](#reporting-bugs)
|
||||||
|
* [Suggesting Enhancements](#suggesting-enhancements)
|
||||||
|
* [Pull Requests](#pull-requests)
|
||||||
|
|
||||||
|
[Styleguides](#styleguides)
|
||||||
|
* [Git Commit Messages](#git-commit-messages)
|
||||||
|
* [JavaScript Styleguide](#javascript-styleguide)
|
||||||
|
* [C++ Styleguide](#c++-styleguide)
|
||||||
|
* [Specs Styleguide](#specs-styleguide)
|
||||||
|
* [Documentation Styleguide](#documentation-styleguide)
|
||||||
|
|
||||||
|
[Debugging](#debugging)
|
||||||
|
* [Debugging C++](#debugging-c)
|
||||||
|
|
||||||
|
## What should I know before I get started?
|
||||||
|
|
||||||
|
### Contributor Agreement
|
||||||
|
|
||||||
|
Not currently required.
|
||||||
|
|
||||||
|
## How can I contribute?
|
||||||
|
|
||||||
|
### Reporting Bugs
|
||||||
|
|
||||||
|
Please use __Github Issues__ to report bugs. When filling out an issue report,
|
||||||
|
make sure to copy any related code and stack traces so we can properly debug.
|
||||||
|
We need to be able to reproduce a failing test to be able to fix your issue
|
||||||
|
most of the time, so a custom written failing test is very helpful.
|
||||||
|
|
||||||
|
Please also note the Kafka broker version that you are using and how many
|
||||||
|
replicas, partitions, and brokers you are connecting to, because some issues
|
||||||
|
might be related to Kafka. A list of `librdkafka` configuration key-value pairs
|
||||||
|
also helps.
|
||||||
|
|
||||||
|
### Suggesting Enhancements
|
||||||
|
|
||||||
|
Please use __Github Issues__ to suggest enhancements. We are happy to consider
|
||||||
|
any extra functionality or features to the library, as long as they add real
|
||||||
|
and related value to users. Describing your use case and why such an addition
|
||||||
|
helps the user base can help guide the decision to implement it into the
|
||||||
|
library's core.
|
||||||
|
|
||||||
|
### Pull Requests
|
||||||
|
|
||||||
|
* Include new test cases (either end-to-end or unit tests) with your change.
|
||||||
|
* Follow our style guides.
|
||||||
|
* Make sure all tests are still passing and the `linter` does not report any issues.
|
||||||
|
* End files with a new line.
|
||||||
|
* Document the new code in the comments (if it is JavaScript) so the
|
||||||
|
documentation generator can update the reference documentation.
|
||||||
|
* Avoid platform-dependent code.
|
||||||
|
<br>**Note:** If making modifications to the underlying C++, please use built-in
|
||||||
|
precompiler directives to detect such platform specificities. Use `Nan`
|
||||||
|
whenever possible to abstract node/v8 version incompatibility.
|
||||||
|
* Make sure your branch is up to date and rebased.
|
||||||
|
* Squash extraneous commits unless their history truly adds value to the library.
|
||||||
|
|
||||||
|
## Styleguides
|
||||||
|
|
||||||
|
### General style guidelines
|
||||||
|
|
||||||
|
Download the [EditorConfig](http://editorconfig.org) plugin for your preferred
|
||||||
|
text editor to automate the application of the following guidelines:
|
||||||
|
|
||||||
|
* Use 2-space indent (no tabs).
|
||||||
|
* Do not leave trailing whitespace on lines.
|
||||||
|
* Files should end with a final newline.
|
||||||
|
|
||||||
|
Also, adhere to the following not enforced by EditorConfig:
|
||||||
|
|
||||||
|
* Limit lines to 80 characters in length. A few extra (<= 5) is fine if it helps
|
||||||
|
readability, use good judgement.
|
||||||
|
* Use `lf` line endings. (git's `core.autocrlf` setting can help)
|
||||||
|
|
||||||
|
### Git Commit Messages
|
||||||
|
|
||||||
|
Commit messages should adhere to the guidelines in tpope's
|
||||||
|
[A Note About Git Commit Messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)
|
||||||
|
|
||||||
|
In short:
|
||||||
|
|
||||||
|
* Use the imperative mood. ("Fix bug", not "Fixed bug" or "Fixes bug")
|
||||||
|
* Limit the first line to 50 characters or less, followed by a blank line
|
||||||
|
and detail paragraphs (limit detail lines to about 72 characters).
|
||||||
|
* Reference issue numbers or pull requests whenever possible.
|
||||||
|
|
||||||
|
### JavaScript Styleguide
|
||||||
|
|
||||||
|
* Place `module.exports` at or near the top of the file.
|
||||||
|
* Defined functions are hoisted, so it is appropriate to define the
|
||||||
|
function after you export it.
|
||||||
|
* When exporting an object, define it first, then export it, and then add
|
||||||
|
methods or properties.
|
||||||
|
* Do not use ES2015 specific features (for example, do not use `let`, `const`,
|
||||||
|
or `class`).
|
||||||
|
* All callbacks should follow the standard Node.js callback signature.
|
||||||
|
* Your JavaScript should properly pass the linter (`make jslint`).
|
||||||
|
|
||||||
|
### C++ Styleguide
|
||||||
|
|
||||||
|
* Class member variables should be prefixed with `m_`.
|
||||||
|
* Use a comment when pointer ownership has changed hands.
|
||||||
|
* Your C++ should properly pass the `cpplint.py` in the `make lint` test.
|
||||||
|
|
||||||
|
### Specs Styleguide
|
||||||
|
|
||||||
|
* Write all JavaScript tests by using the `mocha` testing framework.
|
||||||
|
* All `mocha` tests should use exports syntax.
|
||||||
|
* All `mocha` test files should be suffixed with `.spec.js` instead of `.js`.
|
||||||
|
* Unit tests should mirror the JavaScript files they test (for example,
|
||||||
|
`lib/client.js` is tested in `test/client.spec.js`).
|
||||||
|
* Unit tests should have no outside service dependencies. Any time a dependency,
|
||||||
|
like Kafka, exists, you should create an end-to-end test.
|
||||||
|
* You may mock a connection in a unit test if it is reliably similar to its real
|
||||||
|
variant.
|
||||||
|
|
||||||
|
### Documentation Styleguide
|
||||||
|
|
||||||
|
* Write all JavaScript documentation in jsdoc-compatible inline comments.
|
||||||
|
* Each docblock should have references to return types and parameters. If an
|
||||||
|
object is a parameter, you should also document any required subproperties.
|
||||||
|
* Use `@see` to reference similar pieces of code.
|
||||||
|
* Use comments to document your code when its intent may be difficult to understand.
|
||||||
|
* All documentation outside of the code should be in Github-compatible markdown.
|
||||||
|
* Make good use of font variations like __bold__ and *italics*.
|
||||||
|
* Use headers and tables of contents when they make sense.
|
||||||
|
|
||||||
|
## Editor
|
||||||
|
|
||||||
|
I began using Visual Studio code to develop on `node-rdkafka`. If you use it you can configure the C++ plugin to resolve the paths needed to inform your intellisense. This is the config file I am using on a mac to resolve the required paths:
|
||||||
|
|
||||||
|
`c_cpp_properties.json`
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Mac",
|
||||||
|
"includePath": [
|
||||||
|
"${workspaceFolder}/**",
|
||||||
|
"${workspaceFolder}",
|
||||||
|
"${workspaceFolder}/src",
|
||||||
|
"${workspaceFolder}/node_modules/nan",
|
||||||
|
"${workspaceFolder}/deps/librdkafka/src",
|
||||||
|
"${workspaceFolder}/deps/librdkafka/src-cpp",
|
||||||
|
"/usr/local/include/node",
|
||||||
|
"/usr/local/include/node/uv"
|
||||||
|
],
|
||||||
|
"defines": [],
|
||||||
|
"macFrameworkPath": [
|
||||||
|
"/Library/Developer/CommandLineTools/SDKs/MacOSX10.14.sdk/System/Library/Frameworks"
|
||||||
|
],
|
||||||
|
"compilerPath": "/usr/bin/clang",
|
||||||
|
"cStandard": "c11",
|
||||||
|
"cppStandard": "c++17",
|
||||||
|
"intelliSenseMode": "clang-x64"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"version": 4
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Debugging
|
||||||
|
|
||||||
|
### Debugging C++
|
||||||
|
|
||||||
|
Use `gdb` for debugging (as shown in the following example).
|
||||||
|
|
||||||
|
```
|
||||||
|
node-gyp rebuild --debug
|
||||||
|
|
||||||
|
gdb node
|
||||||
|
(gdb) set args "path/to/file.js"
|
||||||
|
(gdb) run
|
||||||
|
[output here]
|
||||||
|
```
|
||||||
|
|
||||||
|
You can add breakpoints and so on after that.
|
20
LICENSE.txt
Normal file
20
LICENSE.txt
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||||
|
IN THE SOFTWARE.
|
94
Makefile
Normal file
94
Makefile
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
NODE-GYP ?= node_modules/.bin/node-gyp
|
||||||
|
|
||||||
|
# Sick of changing this. Do a check and try to use python 2 if it doesn't work
|
||||||
|
PYTHON_VERSION_FULL := $(wordlist 2,4,$(subst ., ,$(shell python --version 2>&1)))
|
||||||
|
PYTHON_VERSION_MAJOR := $(word 1,${PYTHON_VERSION_FULL})
|
||||||
|
|
||||||
|
ifeq ($(PYTHON_VERSION_MAJOR), 2)
|
||||||
|
PYTHON = python
|
||||||
|
else
|
||||||
|
PYTHON = python2
|
||||||
|
endif
|
||||||
|
|
||||||
|
NODE ?= node
|
||||||
|
CPPLINT ?= cpplint.py
|
||||||
|
BUILDTYPE ?= Release
|
||||||
|
TESTS = "test/**/*.js"
|
||||||
|
E2E_TESTS = $(wildcard e2e/*.spec.js)
|
||||||
|
TEST_REPORTER =
|
||||||
|
TEST_OUTPUT =
|
||||||
|
CONFIG_OUTPUTS = \
|
||||||
|
build/bindings.target.mk \
|
||||||
|
build/Makefile \
|
||||||
|
build/binding.Makefile build/config.gypi
|
||||||
|
|
||||||
|
CPPLINT_FILES = $(wildcard src/*.cc src/*.h)
|
||||||
|
CPPLINT_FILTER = -legal/copyright
|
||||||
|
JSLINT_FILES = lib/*.js test/*.js e2e/*.js
|
||||||
|
|
||||||
|
PACKAGE = $(shell node -pe 'require("./package.json").name.split("/")[1]')
|
||||||
|
VERSION = $(shell node -pe 'require("./package.json").version')
|
||||||
|
|
||||||
|
GYPBUILDARGS=
|
||||||
|
ifeq ($(BUILDTYPE),Debug)
|
||||||
|
GYPBUILDARGS=--debug
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: all clean lint test lib docs e2e ghpages check
|
||||||
|
|
||||||
|
all: lint lib test e2e
|
||||||
|
|
||||||
|
lint: cpplint jslint
|
||||||
|
|
||||||
|
cpplint:
|
||||||
|
@$(PYTHON) $(CPPLINT) --filter=$(CPPLINT_FILTER) $(CPPLINT_FILES)
|
||||||
|
|
||||||
|
jslint: node_modules/.dirstamp
|
||||||
|
@./node_modules/.bin/jshint --verbose $(JSLINT_FILES)
|
||||||
|
|
||||||
|
lib: node_modules/.dirstamp $(CONFIG_OUTPUTS)
|
||||||
|
@PYTHONHTTPSVERIFY=0 $(NODE-GYP) build $(GYPBUILDARGS)
|
||||||
|
|
||||||
|
node_modules/.dirstamp: package.json
|
||||||
|
@npm update --loglevel warn
|
||||||
|
@touch $@
|
||||||
|
|
||||||
|
$(CONFIG_OUTPUTS): node_modules/.dirstamp binding.gyp
|
||||||
|
@$(NODE-GYP) configure
|
||||||
|
|
||||||
|
test: node_modules/.dirstamp
|
||||||
|
@./node_modules/.bin/mocha $(TEST_REPORTER) $(TESTS) $(TEST_OUTPUT)
|
||||||
|
|
||||||
|
check: node_modules/.dirstamp
|
||||||
|
@$(NODE) util/test-compile.js
|
||||||
|
|
||||||
|
e2e: $(E2E_TESTS)
|
||||||
|
@./node_modules/.bin/mocha --exit $(TEST_REPORTER) $(E2E_TESTS) $(TEST_OUTPUT)
|
||||||
|
|
||||||
|
define release
|
||||||
|
NEXT_VERSION=$(shell node -pe 'require("semver").inc("$(VERSION)", "$(1)")')
|
||||||
|
node -e "\
|
||||||
|
var j = require('./package.json');\
|
||||||
|
j.version = \"$$NEXT_VERSION\";\
|
||||||
|
var s = JSON.stringify(j, null, 2);\
|
||||||
|
require('fs').writeFileSync('./package.json', s);" && \
|
||||||
|
git commit -m "release $$NEXT_VERSION" -- package.json && \
|
||||||
|
git tag "$$NEXT_VERSION" -m "release $$NEXT_VERSION"
|
||||||
|
endef
|
||||||
|
|
||||||
|
docs: node_modules/.dirstamp
|
||||||
|
@rm -rf docs
|
||||||
|
@./node_modules/jsdoc/jsdoc.js --destination docs \
|
||||||
|
--recurse -R ./README.md \
|
||||||
|
-t "./node_modules/toolkit-jsdoc/" \
|
||||||
|
--tutorials examples ./lib
|
||||||
|
|
||||||
|
gh-pages: node_modules/.dirstamp
|
||||||
|
@./make_docs.sh
|
||||||
|
|
||||||
|
release-patch:
|
||||||
|
@$(call release,patch)
|
||||||
|
|
||||||
|
clean: node_modules/.dirstamp
|
||||||
|
@rm -f deps/librdkafka/config.h
|
||||||
|
@$(NODE-GYP) clean
|
76
bench/consumer-raw-rdkafka.js
Normal file
76
bench/consumer-raw-rdkafka.js
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
var count = 0;
|
||||||
|
var total = 0;
|
||||||
|
var store = [];
|
||||||
|
var host = process.argv[2] || 'localhost:9092';
|
||||||
|
var topic = process.argv[3] || 'test';
|
||||||
|
|
||||||
|
var consumer = new Kafka.KafkaConsumer({
|
||||||
|
'metadata.broker.list': host,
|
||||||
|
'group.id': 'node-rdkafka-bench-s',
|
||||||
|
'fetch.wait.max.ms': 100,
|
||||||
|
'fetch.message.max.bytes': 1024 * 1024,
|
||||||
|
'enable.auto.commit': false
|
||||||
|
// paused: true,
|
||||||
|
}, {
|
||||||
|
'auto.offset.reset': 'earliest'
|
||||||
|
});
|
||||||
|
|
||||||
|
var interval;
|
||||||
|
|
||||||
|
consumer.connect()
|
||||||
|
.once('ready', function() {
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
})
|
||||||
|
.on('rebalance', function() {
|
||||||
|
console.log('rebalance');
|
||||||
|
})
|
||||||
|
.once('data', function() {
|
||||||
|
interval = setInterval(function() {
|
||||||
|
console.log('%d messages per second', count);
|
||||||
|
if (count > 0) {
|
||||||
|
store.push(count);
|
||||||
|
}
|
||||||
|
count = 0;
|
||||||
|
}, 1000);
|
||||||
|
})
|
||||||
|
.on('data', function(message) {
|
||||||
|
count += 1;
|
||||||
|
total += 1;
|
||||||
|
});
|
||||||
|
|
||||||
|
function shutdown() {
|
||||||
|
clearInterval(interval);
|
||||||
|
|
||||||
|
if (store.length > 0) {
|
||||||
|
var calc = 0;
|
||||||
|
for (var x in store) {
|
||||||
|
calc += store[x];
|
||||||
|
}
|
||||||
|
|
||||||
|
var mps = parseFloat(calc * 1.0/store.length);
|
||||||
|
|
||||||
|
console.log('%d messages per second on average', mps);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var killTimer = setTimeout(function() {
|
||||||
|
process.exit();
|
||||||
|
}, 5000);
|
||||||
|
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
clearTimeout(killTimer);
|
||||||
|
process.exit();
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
77
bench/consumer-subscribe.js
Normal file
77
bench/consumer-subscribe.js
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
var count = 0;
|
||||||
|
var total = 0;
|
||||||
|
var store = [];
|
||||||
|
var host = process.argv[2] || 'localhost:9092';
|
||||||
|
var topic = process.argv[3] || 'test';
|
||||||
|
|
||||||
|
var consumer = new Kafka.KafkaConsumer({
|
||||||
|
'metadata.broker.list': host,
|
||||||
|
'group.id': 'node-rdkafka-bench',
|
||||||
|
'fetch.wait.max.ms': 100,
|
||||||
|
'fetch.message.max.bytes': 1024 * 1024,
|
||||||
|
'enable.auto.commit': false
|
||||||
|
// paused: true,
|
||||||
|
}, {
|
||||||
|
'auto.offset.reset': 'earliest'
|
||||||
|
});
|
||||||
|
|
||||||
|
var interval;
|
||||||
|
|
||||||
|
consumer.connect()
|
||||||
|
.once('ready', function() {
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
})
|
||||||
|
.once('data', function() {
|
||||||
|
interval = setInterval(function() {
|
||||||
|
console.log('%d messages per second', count);
|
||||||
|
if (count > 0) {
|
||||||
|
store.push(count);
|
||||||
|
}
|
||||||
|
count = 0;
|
||||||
|
}, 1000);
|
||||||
|
})
|
||||||
|
.on('data', function(message) {
|
||||||
|
count += 1;
|
||||||
|
total += 1;
|
||||||
|
});
|
||||||
|
|
||||||
|
process.once('SIGTERM', shutdown);
|
||||||
|
process.once('SIGINT', shutdown);
|
||||||
|
process.once('SIGHUP', shutdown);
|
||||||
|
|
||||||
|
function shutdown() {
|
||||||
|
clearInterval(interval);
|
||||||
|
|
||||||
|
if (store.length > 0) {
|
||||||
|
var calc = 0;
|
||||||
|
for (var x in store) {
|
||||||
|
calc += store[x];
|
||||||
|
}
|
||||||
|
|
||||||
|
var mps = parseFloat(calc * 1.0/store.length);
|
||||||
|
|
||||||
|
console.log('%d messages per second on average', mps);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var killTimer = setTimeout(function() {
|
||||||
|
process.exit();
|
||||||
|
}, 5000);
|
||||||
|
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
clearTimeout(killTimer);
|
||||||
|
process.exit();
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
100
bench/kafka-consumer-stream.js
Normal file
100
bench/kafka-consumer-stream.js
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Writable = require('stream').Writable;
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
var count = 0;
|
||||||
|
var total = 0;
|
||||||
|
var store = [];
|
||||||
|
var host = process.argv[2] || 'localhost:9092';
|
||||||
|
var topic = process.argv[3] || 'test';
|
||||||
|
|
||||||
|
var stream = Kafka.createReadStream({
|
||||||
|
'metadata.broker.list': host,
|
||||||
|
'group.id': 'node-rdkafka-benchs',
|
||||||
|
'fetch.wait.max.ms': 100,
|
||||||
|
'fetch.message.max.bytes': 1024 * 1024,
|
||||||
|
'enable.auto.commit': false
|
||||||
|
// paused: true,
|
||||||
|
}, {
|
||||||
|
'auto.offset.reset': 'earliest'
|
||||||
|
}, {
|
||||||
|
fetchSize: 16,
|
||||||
|
topics: [topic]
|
||||||
|
});
|
||||||
|
|
||||||
|
// Track how many messages we see per second
|
||||||
|
var interval;
|
||||||
|
|
||||||
|
var isShuttingDown = false;
|
||||||
|
|
||||||
|
stream
|
||||||
|
.on('error', function(err) {
|
||||||
|
console.log('Shutting down due to error');
|
||||||
|
console.log(err.stack);
|
||||||
|
shutdown();
|
||||||
|
})
|
||||||
|
.once('data', function(d) {
|
||||||
|
interval = setInterval(function() {
|
||||||
|
if (isShuttingDown) {
|
||||||
|
clearInterval(interval);
|
||||||
|
}
|
||||||
|
console.log('%d messages per second', count);
|
||||||
|
if (count > 0) {
|
||||||
|
// Don't store ones when we didn't get data i guess?
|
||||||
|
store.push(count);
|
||||||
|
// setTimeout(shutdown, 500);
|
||||||
|
}
|
||||||
|
count = 0;
|
||||||
|
}, 1000).unref();
|
||||||
|
})
|
||||||
|
.on('end', function() {
|
||||||
|
// Can be called more than once without issue because of guard var
|
||||||
|
console.log('Shutting down due to stream end');
|
||||||
|
shutdown();
|
||||||
|
})
|
||||||
|
.pipe(new Writable({
|
||||||
|
objectMode: true,
|
||||||
|
write: function(message, encoding, cb) {
|
||||||
|
count += 1;
|
||||||
|
total += 1;
|
||||||
|
setImmediate(cb);
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
process.once('SIGTERM', shutdown);
|
||||||
|
process.once('SIGINT', shutdown);
|
||||||
|
process.once('SIGHUP', shutdown);
|
||||||
|
|
||||||
|
function shutdown() {
|
||||||
|
if (isShuttingDown) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
clearInterval(interval);
|
||||||
|
isShuttingDown = true;
|
||||||
|
if (store.length > 0) {
|
||||||
|
var calc = 0;
|
||||||
|
for (var x in store) {
|
||||||
|
calc += store[x];
|
||||||
|
}
|
||||||
|
|
||||||
|
var mps = parseFloat(calc * 1.0/store.length);
|
||||||
|
|
||||||
|
console.log('%d messages per second on average', mps);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy the stream
|
||||||
|
stream.destroy();
|
||||||
|
|
||||||
|
stream.once('end', function() {
|
||||||
|
console.log('total: %d', total);
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
125
bench/producer-raw-rdkafka.js
Normal file
125
bench/producer-raw-rdkafka.js
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
var crypto = require('crypto');
|
||||||
|
var count = 0;
|
||||||
|
var total = 0;
|
||||||
|
var totalComplete = 0;
|
||||||
|
var verifiedComplete = 0;
|
||||||
|
var errors = 0;
|
||||||
|
var store = [];
|
||||||
|
var started;
|
||||||
|
var done = false;
|
||||||
|
var host = process.argv[2] || '127.0.0.1:9092';
|
||||||
|
var topicName = process.argv[3] || 'test';
|
||||||
|
var compression = process.argv[4] || 'gzip';
|
||||||
|
var MAX = process.argv[5] || 10000000;
|
||||||
|
|
||||||
|
var producer = new Kafka.Producer({
|
||||||
|
'metadata.broker.list': host,
|
||||||
|
'group.id': 'node-rdkafka-bench',
|
||||||
|
'compression.codec': compression,
|
||||||
|
'retry.backoff.ms': 200,
|
||||||
|
'message.send.max.retries': 10,
|
||||||
|
'socket.keepalive.enable': true,
|
||||||
|
'queue.buffering.max.messages': 100000,
|
||||||
|
'queue.buffering.max.ms': 1000,
|
||||||
|
'batch.num.messages': 1000
|
||||||
|
});
|
||||||
|
|
||||||
|
// Track how many messages we see per second
|
||||||
|
var interval;
|
||||||
|
var ok = true;
|
||||||
|
|
||||||
|
function getTimer() {
|
||||||
|
if (!interval) {
|
||||||
|
interval = setTimeout(function() {
|
||||||
|
interval = false;
|
||||||
|
if (!done) {
|
||||||
|
console.log('%d messages per sent second', count);
|
||||||
|
store.push(count);
|
||||||
|
count = 0;
|
||||||
|
getTimer();
|
||||||
|
|
||||||
|
} else {
|
||||||
|
console.log('%d messages remaining sent in last batch <1000ms', count);
|
||||||
|
}
|
||||||
|
}, 1000);
|
||||||
|
}
|
||||||
|
|
||||||
|
return interval;
|
||||||
|
}
|
||||||
|
|
||||||
|
var t;
|
||||||
|
|
||||||
|
crypto.randomBytes(4096, function(ex, buffer) {
|
||||||
|
|
||||||
|
producer.connect()
|
||||||
|
.on('ready', function() {
|
||||||
|
getTimer();
|
||||||
|
|
||||||
|
started = new Date().getTime();
|
||||||
|
|
||||||
|
var sendMessage = function() {
|
||||||
|
try {
|
||||||
|
var errorCode = producer.produce(topicName, null, buffer, null);
|
||||||
|
verifiedComplete += 1;
|
||||||
|
} catch (e) {
|
||||||
|
console.error(e);
|
||||||
|
errors++;
|
||||||
|
}
|
||||||
|
|
||||||
|
count += 1;
|
||||||
|
totalComplete += 1;
|
||||||
|
if (totalComplete === MAX) {
|
||||||
|
shutdown();
|
||||||
|
}
|
||||||
|
if (total < MAX) {
|
||||||
|
total += 1;
|
||||||
|
|
||||||
|
// This is 100% sync so we need to setImmediate to give it time
|
||||||
|
// to breathe.
|
||||||
|
setImmediate(sendMessage);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
sendMessage();
|
||||||
|
|
||||||
|
})
|
||||||
|
.on('event.error', function(err) {
|
||||||
|
console.error(err);
|
||||||
|
process.exit(1);
|
||||||
|
})
|
||||||
|
.on('disconnected', shutdown);
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
function shutdown(e) {
|
||||||
|
done = true;
|
||||||
|
|
||||||
|
clearInterval(interval);
|
||||||
|
|
||||||
|
var killTimer = setTimeout(function() {
|
||||||
|
process.exit();
|
||||||
|
}, 5000);
|
||||||
|
|
||||||
|
producer.disconnect(function() {
|
||||||
|
clearTimeout(killTimer);
|
||||||
|
var ended = new Date().getTime();
|
||||||
|
var elapsed = ended - started;
|
||||||
|
|
||||||
|
// console.log('Ended %s', ended);
|
||||||
|
console.log('total: %d messages over %d ms', total, elapsed);
|
||||||
|
|
||||||
|
console.log('%d messages / second', parseInt(total / (elapsed / 1000)));
|
||||||
|
process.exit();
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
117
bench/producer-rdkafka.js
Normal file
117
bench/producer-rdkafka.js
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
var crypto = require('crypto');
|
||||||
|
var count = 0;
|
||||||
|
var total = 0;
|
||||||
|
var totalComplete = 0;
|
||||||
|
var store = [];
|
||||||
|
var host = process.argv[2] || '127.0.0.1:9092';
|
||||||
|
var topicName = process.argv[3] || 'test';
|
||||||
|
var compression = process.argv[4] || 'gzip';
|
||||||
|
var MAX = process.argv[5] || 1000000;
|
||||||
|
|
||||||
|
var stream = Kafka.Producer.createWriteStream({
|
||||||
|
'metadata.broker.list': host,
|
||||||
|
'group.id': 'node-rdkafka-bench',
|
||||||
|
'compression.codec': compression,
|
||||||
|
'retry.backoff.ms': 200,
|
||||||
|
'message.send.max.retries': 10,
|
||||||
|
'socket.keepalive.enable': true,
|
||||||
|
'queue.buffering.max.messages': 100000,
|
||||||
|
'queue.buffering.max.ms': 1000,
|
||||||
|
'batch.num.messages': 1000,
|
||||||
|
}, {}, {
|
||||||
|
topic: topicName,
|
||||||
|
pollInterval: 20
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.on('error', function(e) {
|
||||||
|
console.log(e);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Track how many messages we see per second
|
||||||
|
var interval;
|
||||||
|
var done = false;
|
||||||
|
|
||||||
|
function log() {
|
||||||
|
console.log('%d messages per sent second', count);
|
||||||
|
store.push(count);
|
||||||
|
count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
crypto.randomBytes(4096, function(ex, buffer) {
|
||||||
|
|
||||||
|
var x = function(e) {
|
||||||
|
if (e) {
|
||||||
|
console.error(e);
|
||||||
|
}
|
||||||
|
count += 1;
|
||||||
|
totalComplete += 1;
|
||||||
|
if (totalComplete >= MAX && !done) {
|
||||||
|
done = true;
|
||||||
|
clearInterval(interval);
|
||||||
|
setTimeout(shutdown, 5000);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
function write() {
|
||||||
|
if (!stream.write(buffer, 'base64', x)) {
|
||||||
|
return stream.once('drain', write);
|
||||||
|
} else {
|
||||||
|
total++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (total < MAX) {
|
||||||
|
// we are not done
|
||||||
|
setImmediate(write);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
write();
|
||||||
|
interval = setInterval(log, 1000);
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
console.log(err);
|
||||||
|
});
|
||||||
|
// stream.on('end', shutdown);
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
process.once('SIGTERM', shutdown);
|
||||||
|
process.once('SIGINT', shutdown);
|
||||||
|
process.once('SIGHUP', shutdown);
|
||||||
|
|
||||||
|
function shutdown() {
|
||||||
|
|
||||||
|
if (store.length > 0) {
|
||||||
|
var calc = 0;
|
||||||
|
for (var x in store) {
|
||||||
|
calc += store[x];
|
||||||
|
}
|
||||||
|
|
||||||
|
var mps = parseFloat(calc * 1.0/store.length);
|
||||||
|
|
||||||
|
console.log('%d messages per second on average', mps);
|
||||||
|
console.log('%d messages total', total);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
clearInterval(interval);
|
||||||
|
|
||||||
|
stream.end();
|
||||||
|
|
||||||
|
stream.on('close', function() {
|
||||||
|
console.log('total: %d', total);
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
38
bench/seed.sh
Executable file
38
bench/seed.sh
Executable file
@ -0,0 +1,38 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
kafka_root=${KAFKA_ROOT:-/opt/kafka}
|
||||||
|
# Generate and insert some messages
|
||||||
|
|
||||||
|
OS=$(uname -s)
|
||||||
|
|
||||||
|
function initializeTopic {
|
||||||
|
topic=$1
|
||||||
|
host=$2
|
||||||
|
msg_size=$3
|
||||||
|
batch_size=$4
|
||||||
|
batch_count=$5
|
||||||
|
|
||||||
|
if [ $host == "localhost:9092" ]; then
|
||||||
|
${kafka_root}/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
|
||||||
|
--replication-factor 1 --partitions 1 --topic ${topic}
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Generating messages (size: ${msg_size})"
|
||||||
|
: > /tmp/msgs # Truncate /tmp/msgs
|
||||||
|
for i in $(seq 1 ${batch_size}); do
|
||||||
|
if [ $OS == 'Darwin' ]; then
|
||||||
|
printf %s\\n "$(head -c${msg_size} /dev/urandom | base64)" >> /tmp/msgs
|
||||||
|
else
|
||||||
|
printf %s\\n "$(head --bytes=${msg_size} /dev/urandom | base64 --wrap=0)" >> /tmp/msgs
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Done generating messages"
|
||||||
|
|
||||||
|
for i in $(seq 1 ${batch_count}); do
|
||||||
|
echo "Adding $(wc -l /tmp/msgs) messages to topic ${topic}"
|
||||||
|
"${kafka_root}/bin/kafka-console-producer.sh" \
|
||||||
|
--broker-list ${host} --topic ${topic} < /tmp/msgs
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
initializeTopic "librdtesting-01" "localhost:9092" "4096" "5000" "2000"
|
154
binding.gyp
Normal file
154
binding.gyp
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
{
|
||||||
|
"variables": {
|
||||||
|
# may be redefined in command line on configuration stage
|
||||||
|
# "BUILD_LIBRDKAFKA%": "<!(echo ${BUILD_LIBRDKAFKA:-1})"
|
||||||
|
"BUILD_LIBRDKAFKA%": "<!(node ./util/get-env.js BUILD_LIBRDKAFKA 1)",
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"target_name": "node-librdkafka",
|
||||||
|
'sources': [
|
||||||
|
'src/binding.cc',
|
||||||
|
'src/callbacks.cc',
|
||||||
|
'src/common.cc',
|
||||||
|
'src/config.cc',
|
||||||
|
'src/connection.cc',
|
||||||
|
'src/errors.cc',
|
||||||
|
'src/kafka-consumer.cc',
|
||||||
|
'src/producer.cc',
|
||||||
|
'src/topic.cc',
|
||||||
|
'src/workers.cc',
|
||||||
|
'src/admin.cc'
|
||||||
|
],
|
||||||
|
"include_dirs": [
|
||||||
|
"<!(node -e \"require('nan')\")",
|
||||||
|
"<(module_root_dir)/"
|
||||||
|
],
|
||||||
|
'conditions': [
|
||||||
|
[
|
||||||
|
'OS=="win"',
|
||||||
|
{
|
||||||
|
'actions': [
|
||||||
|
{
|
||||||
|
'action_name': 'nuget_librdkafka_download',
|
||||||
|
'inputs': [
|
||||||
|
'deps/windows-install.py'
|
||||||
|
],
|
||||||
|
'outputs': [
|
||||||
|
'deps/precompiled/librdkafka.lib',
|
||||||
|
'deps/precompiled/librdkafkacpp.lib'
|
||||||
|
],
|
||||||
|
'message': 'Getting librdkafka from nuget',
|
||||||
|
'action': ['python', '<@(_inputs)']
|
||||||
|
}
|
||||||
|
],
|
||||||
|
'cflags_cc' : [
|
||||||
|
'-std=c++14'
|
||||||
|
],
|
||||||
|
'msvs_settings': {
|
||||||
|
'VCLinkerTool': {
|
||||||
|
'AdditionalDependencies': [
|
||||||
|
'librdkafka.lib',
|
||||||
|
'librdkafkacpp.lib'
|
||||||
|
],
|
||||||
|
'AdditionalLibraryDirectories': [
|
||||||
|
'../deps/precompiled/'
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'VCCLCompilerTool': {
|
||||||
|
'AdditionalOptions': [
|
||||||
|
'/GR'
|
||||||
|
],
|
||||||
|
'AdditionalUsingDirectories': [
|
||||||
|
'deps/precompiled/'
|
||||||
|
],
|
||||||
|
'AdditionalIncludeDirectories': [
|
||||||
|
'deps/librdkafka/src',
|
||||||
|
'deps/librdkafka/src-cpp'
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'include_dirs': [
|
||||||
|
'deps/include'
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'conditions': [
|
||||||
|
[ "<(BUILD_LIBRDKAFKA)==1",
|
||||||
|
{
|
||||||
|
"dependencies": [
|
||||||
|
"deps/librdkafka.gyp:librdkafka"
|
||||||
|
],
|
||||||
|
"include_dirs": [
|
||||||
|
"deps/librdkafka/src",
|
||||||
|
"deps/librdkafka/src-cpp"
|
||||||
|
],
|
||||||
|
'conditions': [
|
||||||
|
[
|
||||||
|
'OS=="linux"',
|
||||||
|
{
|
||||||
|
"libraries": [
|
||||||
|
"../build/deps/librdkafka.so",
|
||||||
|
"../build/deps/librdkafka++.so",
|
||||||
|
"-Wl,-rpath='$$ORIGIN/../deps'",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
],
|
||||||
|
[
|
||||||
|
'OS=="mac"',
|
||||||
|
{
|
||||||
|
"libraries": [
|
||||||
|
"../build/deps/librdkafka.dylib",
|
||||||
|
"../build/deps/librdkafka++.dylib",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
],
|
||||||
|
},
|
||||||
|
# Else link against globally installed rdkafka and use
|
||||||
|
# globally installed headers. On Debian, you should
|
||||||
|
# install the librdkafka1, librdkafka++1, and librdkafka-dev
|
||||||
|
# .deb packages.
|
||||||
|
{
|
||||||
|
"libraries": ["-lrdkafka", "-lrdkafka++"],
|
||||||
|
"include_dirs": [
|
||||||
|
"/usr/include/librdkafka",
|
||||||
|
"/usr/local/include/librdkafka",
|
||||||
|
"/opt/include/librdkafka",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
[
|
||||||
|
'OS=="linux"',
|
||||||
|
{
|
||||||
|
'cflags_cc' : [
|
||||||
|
'-std=c++14'
|
||||||
|
],
|
||||||
|
'cflags_cc!': [
|
||||||
|
'-fno-rtti'
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
[
|
||||||
|
'OS=="mac"',
|
||||||
|
{
|
||||||
|
'xcode_settings': {
|
||||||
|
'MACOSX_DEPLOYMENT_TARGET': '10.11',
|
||||||
|
'GCC_ENABLE_CPP_RTTI': 'YES',
|
||||||
|
'OTHER_LDFLAGS': [
|
||||||
|
'-L/usr/local/opt/openssl/lib'
|
||||||
|
],
|
||||||
|
'OTHER_CPLUSPLUSFLAGS': [
|
||||||
|
'-I/usr/local/opt/openssl/include',
|
||||||
|
'-std=c++14'
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
67
ci/checks/librdkafka-correct-version.js
Normal file
67
ci/checks/librdkafka-correct-version.js
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
const path = require('path');
|
||||||
|
const fs = require('fs');
|
||||||
|
|
||||||
|
const root = path.resolve(__dirname, '..', '..');
|
||||||
|
const pjsPath = path.join(root, 'package.json');
|
||||||
|
|
||||||
|
const librdkafkaPath = path.resolve(root, 'deps', 'librdkafka');
|
||||||
|
const pjs = require(pjsPath);
|
||||||
|
|
||||||
|
const majorMask = 0xff000000;
|
||||||
|
const minorMask = 0x00ff0000;
|
||||||
|
const patchMask = 0x0000ff00;
|
||||||
|
const revMask = 0x000000ff;
|
||||||
|
|
||||||
|
// Read the header file
|
||||||
|
const headerFileLines = fs.readFileSync(path.resolve(librdkafkaPath, 'src', 'rdkafka.h')).toString().split('\n');
|
||||||
|
const precompilerDefinitions = headerFileLines.filter((line) => line.startsWith('#def'));
|
||||||
|
const definedLines = precompilerDefinitions.map(definedLine => {
|
||||||
|
const content = definedLine.split(' ').filter(v => v != '');
|
||||||
|
|
||||||
|
return {
|
||||||
|
command: content[0],
|
||||||
|
key: content[1],
|
||||||
|
value: content[2]
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
const defines = {};
|
||||||
|
|
||||||
|
for (let item of definedLines) {
|
||||||
|
if (item.command == '#define') {
|
||||||
|
defines[item.key] = item.value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseLibrdkafkaVersion(version) {
|
||||||
|
const intRepresentation = parseInt(version);
|
||||||
|
|
||||||
|
const major = (intRepresentation & majorMask) >> (8 * 3);
|
||||||
|
const minor = (intRepresentation & minorMask) >> (8 * 2);
|
||||||
|
const patch = (intRepresentation & patchMask) >> (8 * 1);
|
||||||
|
const rev = (intRepresentation & revMask) >> (8 * 0);
|
||||||
|
|
||||||
|
return {
|
||||||
|
major,
|
||||||
|
minor,
|
||||||
|
patch,
|
||||||
|
rev
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function versionAsString(version) {
|
||||||
|
return [
|
||||||
|
version.major,
|
||||||
|
version.minor,
|
||||||
|
version.patch,
|
||||||
|
version.rev === 255 ? null : version.rev,
|
||||||
|
].filter(v => v != null).join('.');
|
||||||
|
}
|
||||||
|
|
||||||
|
const librdkafkaVersion = parseLibrdkafkaVersion(defines.RD_KAFKA_VERSION);
|
||||||
|
const versionString = versionAsString(librdkafkaVersion);
|
||||||
|
|
||||||
|
if (pjs.librdkafka !== versionString) {
|
||||||
|
console.error(`Librdkafka version of ${versionString} does not match package json: ${pjs.librdkafka}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
13
ci/checks/librdkafka-exists.js
Normal file
13
ci/checks/librdkafka-exists.js
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
const path = require('path');
|
||||||
|
const fs = require('fs');
|
||||||
|
|
||||||
|
const root = path.resolve(__dirname, '..', '..');
|
||||||
|
const librdkafkaPath = path.resolve(root, 'deps', 'librdkafka');
|
||||||
|
|
||||||
|
// Ensure librdkafka is in the deps directory - this makes sure we don't accidentally
|
||||||
|
// publish on a non recursive clone :)
|
||||||
|
|
||||||
|
if (!fs.existsSync(librdkafkaPath)) {
|
||||||
|
console.error(`Could not find librdkafka at path ${librdkafkaPath}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
193
ci/librdkafka-defs-generator.js
Normal file
193
ci/librdkafka-defs-generator.js
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
const fs = require('fs');
|
||||||
|
const path = require('path');
|
||||||
|
|
||||||
|
const LIBRDKAFKA_VERSION = require('../package.json').librdkafka;
|
||||||
|
const LIBRDKAFKA_DIR = path.resolve(__dirname, '../deps/librdkafka/');
|
||||||
|
|
||||||
|
function getHeader(file) {
|
||||||
|
return `// ====== Generated from librdkafka ${LIBRDKAFKA_VERSION} file ${file} ======`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function readLibRDKafkaFile(file) {
|
||||||
|
return fs.readFileSync(path.resolve(LIBRDKAFKA_DIR, file)).toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
function extractConfigItems(configStr) {
|
||||||
|
const [_header, config] = configStr.split(/-{5,}\|.*/);
|
||||||
|
|
||||||
|
const re = /(.*?)\|(.*?)\|(.*?)\|(.*?)\|(.*?)\|(.*)/g;
|
||||||
|
|
||||||
|
const configItems = [];
|
||||||
|
|
||||||
|
let m;
|
||||||
|
do {
|
||||||
|
m = re.exec(config);
|
||||||
|
if (m) {
|
||||||
|
const [
|
||||||
|
_fullString,
|
||||||
|
property,
|
||||||
|
consumerOrProducer,
|
||||||
|
range,
|
||||||
|
defaultValue,
|
||||||
|
importance,
|
||||||
|
descriptionWithType,
|
||||||
|
] = m.map(el => (typeof el === 'string' ? el.trim() : el));
|
||||||
|
|
||||||
|
const splitDescriptionRe = /(.*?)\s*?<br>.*?:\s.*?(.*?)\*/;
|
||||||
|
const [_, description, rawType] = splitDescriptionRe.exec(descriptionWithType);
|
||||||
|
|
||||||
|
configItems.push({
|
||||||
|
property,
|
||||||
|
consumerOrProducer,
|
||||||
|
range,
|
||||||
|
defaultValue,
|
||||||
|
importance,
|
||||||
|
description,
|
||||||
|
rawType,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} while (m);
|
||||||
|
|
||||||
|
return configItems.map(processItem);
|
||||||
|
}
|
||||||
|
|
||||||
|
function processItem(configItem) {
|
||||||
|
// These items are overwritten by node-rdkafka
|
||||||
|
switch (configItem.property) {
|
||||||
|
case 'dr_msg_cb':
|
||||||
|
return { ...configItem, type: 'boolean' };
|
||||||
|
case 'dr_cb':
|
||||||
|
return { ...configItem, type: 'boolean | Function' };
|
||||||
|
case 'rebalance_cb':
|
||||||
|
return { ...configItem, type: 'boolean | Function' };
|
||||||
|
case 'offset_commit_cb':
|
||||||
|
return { ...configItem, type: 'boolean | Function' };
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (configItem.rawType) {
|
||||||
|
case 'integer':
|
||||||
|
return { ...configItem, type: 'number' };
|
||||||
|
case 'boolean':
|
||||||
|
return { ...configItem, type: 'boolean' };
|
||||||
|
case 'string':
|
||||||
|
case 'CSV flags':
|
||||||
|
return { ...configItem, type: 'string' };
|
||||||
|
case 'enum value':
|
||||||
|
return {
|
||||||
|
...configItem,
|
||||||
|
type: configItem.range
|
||||||
|
.split(',')
|
||||||
|
.map(str => `'${str.trim()}'`)
|
||||||
|
.join(' | '),
|
||||||
|
};
|
||||||
|
default:
|
||||||
|
return { ...configItem, type: 'any' };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateInterface(interfaceDef, configItems) {
|
||||||
|
const fields = configItems
|
||||||
|
.map(item =>
|
||||||
|
[
|
||||||
|
`/**`,
|
||||||
|
` * ${item.description}`,
|
||||||
|
...(item.defaultValue ? [` *`, ` * @default ${item.defaultValue}`] : []),
|
||||||
|
` */`,
|
||||||
|
`"${item.property}"?: ${item.type};`,
|
||||||
|
]
|
||||||
|
.map(row => ` ${row}`)
|
||||||
|
.join('\n')
|
||||||
|
)
|
||||||
|
.join('\n\n');
|
||||||
|
|
||||||
|
return `export interface ` + interfaceDef + ' {\n' + fields + '\n}';
|
||||||
|
}
|
||||||
|
|
||||||
|
function addSpecialGlobalProps(globalProps) {
|
||||||
|
globalProps.push({
|
||||||
|
"property": "event_cb",
|
||||||
|
"consumerOrProducer": "*",
|
||||||
|
"range": "",
|
||||||
|
"defaultValue": "true",
|
||||||
|
"importance": "low",
|
||||||
|
"description": "Enables or disables `event.*` emitting.",
|
||||||
|
"rawType": "boolean",
|
||||||
|
"type": "boolean"
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateConfigDTS(file) {
|
||||||
|
const configuration = readLibRDKafkaFile(file);
|
||||||
|
const [globalStr, topicStr] = configuration.split('Topic configuration properties');
|
||||||
|
|
||||||
|
const [globalProps, topicProps] = [extractConfigItems(globalStr), extractConfigItems(topicStr)];
|
||||||
|
|
||||||
|
addSpecialGlobalProps(globalProps);
|
||||||
|
|
||||||
|
const [globalSharedProps, producerGlobalProps, consumerGlobalProps] = [
|
||||||
|
globalProps.filter(i => i.consumerOrProducer === '*'),
|
||||||
|
globalProps.filter(i => i.consumerOrProducer === 'P'),
|
||||||
|
globalProps.filter(i => i.consumerOrProducer === 'C'),
|
||||||
|
];
|
||||||
|
|
||||||
|
const [topicSharedProps, producerTopicProps, consumerTopicProps] = [
|
||||||
|
topicProps.filter(i => i.consumerOrProducer === '*'),
|
||||||
|
topicProps.filter(i => i.consumerOrProducer === 'P'),
|
||||||
|
topicProps.filter(i => i.consumerOrProducer === 'C'),
|
||||||
|
];
|
||||||
|
|
||||||
|
let output = `${getHeader(file)}
|
||||||
|
// Code that generated this is a derivative work of the code from Nam Nguyen
|
||||||
|
// https://gist.github.com/ntgn81/066c2c8ec5b4238f85d1e9168a04e3fb
|
||||||
|
|
||||||
|
`;
|
||||||
|
|
||||||
|
output += [
|
||||||
|
generateInterface('GlobalConfig', globalSharedProps),
|
||||||
|
generateInterface('ProducerGlobalConfig extends GlobalConfig', producerGlobalProps),
|
||||||
|
generateInterface('ConsumerGlobalConfig extends GlobalConfig', consumerGlobalProps),
|
||||||
|
generateInterface('TopicConfig', topicSharedProps),
|
||||||
|
generateInterface('ProducerTopicConfig extends TopicConfig', producerTopicProps),
|
||||||
|
generateInterface('ConsumerTopicConfig extends TopicConfig', consumerTopicProps),
|
||||||
|
].join('\n\n');
|
||||||
|
|
||||||
|
fs.writeFileSync(path.resolve(__dirname, '../config.d.ts'), output);
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateErrorDefinitions(file) {
|
||||||
|
const rdkafkacpp_h = readLibRDKafkaFile(file);
|
||||||
|
const m = /enum ErrorCode {([^}]+)}/g.exec(rdkafkacpp_h);
|
||||||
|
if (!m) {
|
||||||
|
throw new Error(`Can't read rdkafkacpp.h file`)
|
||||||
|
}
|
||||||
|
const body = m[1]
|
||||||
|
.replace(/(\t)|( +)/g, ' ')
|
||||||
|
.replace(/\n\n/g, '\n')
|
||||||
|
.replace(/\s+=\s+/g, ': ')
|
||||||
|
.replace(/[\t ]*#define +(\w+) +(\w+)/g, (_, define, original) => {
|
||||||
|
const value = new RegExp(`${original}\\s+=\\s+(\\d+)`).exec(m[1])[1];
|
||||||
|
return ` ${define}: ${value},`;
|
||||||
|
})
|
||||||
|
|
||||||
|
// validate body
|
||||||
|
const emptyCheck = body
|
||||||
|
.replace(/(( \/\*)|( ?\*)).*/g, '')
|
||||||
|
.replace(/ ERR_\w+: -?\d+,?\n/g, '')
|
||||||
|
.trim()
|
||||||
|
if (emptyCheck !== '') {
|
||||||
|
throw new Error(`Fail to parse ${file}. It contains these extra details:\n${emptyCheck}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const error_js_file = path.resolve(__dirname, '../lib/error.js');
|
||||||
|
const error_js = fs.readFileSync(error_js_file)
|
||||||
|
.toString()
|
||||||
|
.replace(/(\/\/.*\n)?LibrdKafkaError.codes = {[^}]+/g, `${getHeader(file)}\nLibrdKafkaError.codes = {\n${body}`)
|
||||||
|
|
||||||
|
fs.writeFileSync(error_js_file, error_js);
|
||||||
|
fs.writeFileSync(path.resolve(__dirname, '../errors.d.ts'), `${getHeader(file)}\nexport const CODES: { ERRORS: {${body.replace(/[ \.]*(\*\/\n \w+: )(-?\d+),?/g, ' (**$2**) $1number,')}}}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
(async function updateTypeDefs() {
|
||||||
|
generateConfigDTS('CONFIGURATION.md');
|
||||||
|
updateErrorDefinitions('src-cpp/rdkafkacpp.h');
|
||||||
|
})()
|
4
ci/prepublish.js
Normal file
4
ci/prepublish.js
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
require('./checks/librdkafka-exists');
|
||||||
|
require('./checks/librdkafka-correct-version');
|
||||||
|
require('./librdkafka-defs-generator.js');
|
||||||
|
require('./update-version');
|
119
ci/update-version.js
Normal file
119
ci/update-version.js
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
const path = require('path');
|
||||||
|
const semver = require('semver');
|
||||||
|
const { spawn } = require('child_process');
|
||||||
|
const fs = require('fs');
|
||||||
|
|
||||||
|
const root = path.resolve(__dirname, '..');
|
||||||
|
const pjsPath = path.resolve(root, 'package.json');
|
||||||
|
const pjs = require(pjsPath);
|
||||||
|
|
||||||
|
function parseVersion(tag) {
|
||||||
|
const { major, minor, prerelease, patch } = semver.parse(tag);
|
||||||
|
|
||||||
|
// Describe will give is commits since last tag
|
||||||
|
const [ commitsSinceTag, hash ] = prerelease[0] ? prerelease[0].split('-') : [
|
||||||
|
1,
|
||||||
|
process.env.TRAVIS_COMMIT || ''
|
||||||
|
];
|
||||||
|
|
||||||
|
return {
|
||||||
|
major,
|
||||||
|
minor,
|
||||||
|
prerelease,
|
||||||
|
patch,
|
||||||
|
commit: commitsSinceTag - 1,
|
||||||
|
hash
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function getCommandOutput(command, args, cb) {
|
||||||
|
let output = '';
|
||||||
|
|
||||||
|
const cmd = spawn(command, args);
|
||||||
|
|
||||||
|
cmd.stdout.on('data', (data) => {
|
||||||
|
output += data;
|
||||||
|
});
|
||||||
|
|
||||||
|
cmd.on('close', (code) => {
|
||||||
|
if (code != 0) {
|
||||||
|
cb(new Error(`Command returned unsuccessful code: ${code}`));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cb(null, output.trim());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function getVersion(cb) {
|
||||||
|
// https://docs.travis-ci.com/user/environment-variables/
|
||||||
|
if (process.env.TRAVIS_TAG) {
|
||||||
|
setImmediate(() => cb(null, parseVersion(process.env.TRAVIS_TAG.trim())));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
getCommandOutput('git', ['describe', '--tags'], (err, result) => {
|
||||||
|
if (err) {
|
||||||
|
cb(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cb(null, parseVersion(result.trim()));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function getBranch(cb) {
|
||||||
|
if (process.env.TRAVIS_TAG) {
|
||||||
|
// TRAVIS_BRANCH matches TRAVIS_TAG when TRAVIS_TAG is set
|
||||||
|
// "git branch --contains tags/TRAVIS_TAG" doesn't work on travis so we have to assume 'master'
|
||||||
|
setImmediate(() => cb(null, 'master'));
|
||||||
|
return;
|
||||||
|
} else if (process.env.TRAVIS_BRANCH) {
|
||||||
|
setImmediate(() => cb(null, process.env.TRAVIS_BRANCH.trim()));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
getCommandOutput('git', ['rev-parse', '--abbrev-ref', 'HEAD'], (err, result) => {
|
||||||
|
if (err) {
|
||||||
|
cb(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cb(null, result.trim());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function getPackageVersion(tag, branch) {
|
||||||
|
let baseVersion = `v${tag.major}.${tag.minor}.${tag.patch}`;
|
||||||
|
|
||||||
|
if (tag.commit === 0 && branch === 'master') {
|
||||||
|
return baseVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
baseVersion += '-';
|
||||||
|
|
||||||
|
if (branch !== 'master') {
|
||||||
|
baseVersion += (tag.commit + 1 + '.' + branch);
|
||||||
|
} else {
|
||||||
|
baseVersion += (tag.commit + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return baseVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
getVersion((err, tag) => {
|
||||||
|
if (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
|
||||||
|
getBranch((err, branch) => {
|
||||||
|
if (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
|
||||||
|
pjs.version = getPackageVersion(tag, branch);
|
||||||
|
|
||||||
|
fs.writeFileSync(pjsPath, JSON.stringify(pjs, null, 2));
|
||||||
|
})
|
||||||
|
|
||||||
|
});
|
1023
config.d.ts
vendored
Normal file
1023
config.d.ts
vendored
Normal file
File diff suppressed because it is too large
Load Diff
15
configure
vendored
Executable file
15
configure
vendored
Executable file
@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# This file is intended to be run on unix systems to configure librdkafka
|
||||||
|
# inside the submodules
|
||||||
|
|
||||||
|
# This does not get run on windows which uses the build in solutions file
|
||||||
|
|
||||||
|
# Get script directory
|
||||||
|
scriptdir=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
|
||||||
|
|
||||||
|
pushd ./deps/librdkafka &> /dev/null
|
||||||
|
|
||||||
|
./configure --prefix="${scriptdir}/build/deps" --libdir="${scriptdir}/build/deps" $*
|
||||||
|
|
||||||
|
popd &> /dev/null
|
6325
cpplint.py
vendored
Normal file
6325
cpplint.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
BIN
deploy.enc
Normal file
BIN
deploy.enc
Normal file
Binary file not shown.
62
deps/librdkafka.gyp
vendored
Normal file
62
deps/librdkafka.gyp
vendored
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
{
|
||||||
|
'targets': [
|
||||||
|
{
|
||||||
|
"target_name": "librdkafka",
|
||||||
|
"type": "none",
|
||||||
|
"conditions": [
|
||||||
|
[
|
||||||
|
'OS=="win"',
|
||||||
|
{
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"actions": [
|
||||||
|
{
|
||||||
|
"action_name": "configure",
|
||||||
|
"inputs": [],
|
||||||
|
"outputs": [
|
||||||
|
"librdkafka/config.h",
|
||||||
|
],
|
||||||
|
"action": [
|
||||||
|
"node", "../util/configure"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action_name": "build_dependencies",
|
||||||
|
"inputs": [
|
||||||
|
"librdkafka/config.h",
|
||||||
|
],
|
||||||
|
"action": [
|
||||||
|
"make", "-C", "librdkafka", "libs", "install"
|
||||||
|
],
|
||||||
|
"conditions": [
|
||||||
|
[
|
||||||
|
'OS=="mac"',
|
||||||
|
{
|
||||||
|
'outputs': [
|
||||||
|
'deps/librdkafka/src-cpp/librdkafka++.dylib',
|
||||||
|
'deps/librdkafka/src-cpp/librdkafka++.1.dylib',
|
||||||
|
'deps/librdkafka/src/librdkafka.dylib',
|
||||||
|
'deps/librdkafka/src/librdkafka.1.dylib'
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'outputs': [
|
||||||
|
'deps/librdkafka/src-cpp/librdkafka++.so',
|
||||||
|
'deps/librdkafka/src-cpp/librdkafka++.so.1',
|
||||||
|
'deps/librdkafka/src/librdkafka.so',
|
||||||
|
'deps/librdkafka/src/librdkafka.so.1',
|
||||||
|
'deps/librdkafka/src-cpp/librdkafka++.a',
|
||||||
|
'deps/librdkafka/src/librdkafka.a',
|
||||||
|
],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
75
deps/windows-install.py
vendored
Normal file
75
deps/windows-install.py
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
librdkafkaVersion = ''
|
||||||
|
# read librdkafka version from package.json
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
with open('../package.json') as f:
|
||||||
|
librdkafkaVersion = json.load(f)['librdkafka']
|
||||||
|
librdkafkaWinSufix = '7' if librdkafkaVersion == '0.11.5' else '';
|
||||||
|
|
||||||
|
depsPrecompiledDir = '../deps/precompiled'
|
||||||
|
depsIncludeDir = '../deps/include'
|
||||||
|
buildReleaseDir = 'Release'
|
||||||
|
|
||||||
|
# alternative: 'https://api.nuget.org/v3-flatcontainer/librdkafka.redist/{}/librdkafka.redist.{}.nupkg'.format(librdkafkaVersion, librdkafkaVersion)
|
||||||
|
env_dist = os.environ
|
||||||
|
downloadBaseUrl = env_dist['NODE_RDKAFKA_NUGET_BASE_URL'] if 'NODE_RDKAFKA_NUGET_BASE_URL' in env_dist else 'https://globalcdn.nuget.org/packages/'
|
||||||
|
librdkafkaNugetUrl = downloadBaseUrl + 'librdkafka.redist.{}.nupkg'.format(librdkafkaVersion)
|
||||||
|
print('download librdkafka form ' + librdkafkaNugetUrl)
|
||||||
|
outputDir = 'librdkafka.redist'
|
||||||
|
outputFile = outputDir + '.zip'
|
||||||
|
dllPath = outputDir + '/runtimes/win{}-x64/native'.format(librdkafkaWinSufix)
|
||||||
|
libPath = outputDir + '/build/native/lib/win{}/x64/win{}-x64-Release/v120'.format(librdkafkaWinSufix, librdkafkaWinSufix)
|
||||||
|
includePath = outputDir + '/build/native/include/librdkafka'
|
||||||
|
|
||||||
|
# download librdkafka from nuget
|
||||||
|
try:
|
||||||
|
# For Python 3.0 and later
|
||||||
|
from urllib.request import urlopen
|
||||||
|
except ImportError:
|
||||||
|
# Fall back to Python 2's urllib2
|
||||||
|
from urllib2 import urlopen
|
||||||
|
import ssl
|
||||||
|
|
||||||
|
filedata = urlopen(librdkafkaNugetUrl, context=ssl._create_unverified_context())
|
||||||
|
|
||||||
|
datatowrite = filedata.read()
|
||||||
|
with open(outputFile, 'wb') as f:
|
||||||
|
f.write(datatowrite)
|
||||||
|
|
||||||
|
# extract package
|
||||||
|
import zipfile
|
||||||
|
zip_ref = zipfile.ZipFile(outputFile, 'r')
|
||||||
|
zip_ref.extractall(outputDir)
|
||||||
|
zip_ref.close()
|
||||||
|
|
||||||
|
# copy files
|
||||||
|
import shutil, os, errno
|
||||||
|
|
||||||
|
def createdir(dir):
|
||||||
|
try:
|
||||||
|
os.makedirs(dir)
|
||||||
|
except OSError as e:
|
||||||
|
if errno.EEXIST != e.errno:
|
||||||
|
raise
|
||||||
|
|
||||||
|
createdir(depsPrecompiledDir)
|
||||||
|
createdir(depsIncludeDir)
|
||||||
|
createdir(buildReleaseDir)
|
||||||
|
|
||||||
|
shutil.copy2(libPath + '/librdkafka.lib', depsPrecompiledDir)
|
||||||
|
shutil.copy2(libPath + '/librdkafkacpp.lib', depsPrecompiledDir)
|
||||||
|
|
||||||
|
shutil.copy2(includePath + '/rdkafka.h', depsIncludeDir)
|
||||||
|
shutil.copy2(includePath + '/rdkafkacpp.h', depsIncludeDir)
|
||||||
|
|
||||||
|
shutil.copy2(dllPath + '/zlib.dll', buildReleaseDir)
|
||||||
|
shutil.copy2(dllPath + '/msvcr120.dll', buildReleaseDir)
|
||||||
|
shutil.copy2(dllPath + '/librdkafka.dll', buildReleaseDir)
|
||||||
|
shutil.copy2(dllPath + '/librdkafkacpp.dll', buildReleaseDir)
|
||||||
|
if not librdkafkaVersion.startswith('0.'):
|
||||||
|
shutil.copy2(dllPath + '/libzstd.dll', buildReleaseDir)
|
||||||
|
shutil.copy2(dllPath + '/msvcp120.dll', buildReleaseDir)
|
||||||
|
|
||||||
|
# clean up
|
||||||
|
os.remove(outputFile)
|
||||||
|
shutil.rmtree(outputDir)
|
23
docker-compose.yml
Normal file
23
docker-compose.yml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
zookeeper:
|
||||||
|
image: confluentinc/cp-zookeeper
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
environment:
|
||||||
|
ZOOKEEPER_CLIENT_PORT: 2181
|
||||||
|
ZOOKEEPER_TICK_TIME: 2000
|
||||||
|
kafka:
|
||||||
|
image: confluentinc/cp-kafka
|
||||||
|
links:
|
||||||
|
- zookeeper
|
||||||
|
ports:
|
||||||
|
- "9092:9092"
|
||||||
|
environment:
|
||||||
|
KAFKA_BROKER_ID: 1
|
||||||
|
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||||
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092'
|
||||||
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_DEFAULT_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
193
e2e/admin.spec.js
Normal file
193
e2e/admin.spec.js
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
var t = require('assert');
|
||||||
|
|
||||||
|
var eventListener = require('./listener');
|
||||||
|
var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
|
||||||
|
var time = Date.now();
|
||||||
|
|
||||||
|
function pollForTopic(client, topicName, maxTries, tryDelay, cb, customCondition) {
|
||||||
|
var tries = 0;
|
||||||
|
|
||||||
|
function getTopicIfExists(innerCb) {
|
||||||
|
client.getMetadata({
|
||||||
|
topic: topicName,
|
||||||
|
}, function(metadataErr, metadata) {
|
||||||
|
if (metadataErr) {
|
||||||
|
cb(metadataErr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var topicFound = metadata.topics.filter(function(topicObj) {
|
||||||
|
var foundTopic = topicObj.name === topicName;
|
||||||
|
|
||||||
|
// If we have a custom condition for "foundedness", do it here after
|
||||||
|
// we make sure we are operating on the correct topic
|
||||||
|
if (foundTopic && customCondition) {
|
||||||
|
return customCondition(topicObj);
|
||||||
|
}
|
||||||
|
return foundTopic;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (topicFound.length >= 1) {
|
||||||
|
innerCb(null, topicFound[0]);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
innerCb(new Error('Could not find topic ' + topicName));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function maybeFinish(err, obj) {
|
||||||
|
if (err) {
|
||||||
|
queueNextTry();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cb(null, obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
function queueNextTry() {
|
||||||
|
tries += 1;
|
||||||
|
if (tries < maxTries) {
|
||||||
|
setTimeout(function() {
|
||||||
|
getTopicIfExists(maybeFinish);
|
||||||
|
}, tryDelay);
|
||||||
|
} else {
|
||||||
|
cb(new Error('Exceeded max tries of ' + maxTries));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
queueNextTry();
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('Admin', function() {
|
||||||
|
var client;
|
||||||
|
var producer;
|
||||||
|
|
||||||
|
before(function(done) {
|
||||||
|
producer = new Kafka.Producer({
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
});
|
||||||
|
producer.connect(null, function(err) {
|
||||||
|
t.ifError(err);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
after(function(done) {
|
||||||
|
producer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(function() {
|
||||||
|
this.timeout(10000);
|
||||||
|
client = Kafka.AdminClient.create({
|
||||||
|
'client.id': 'kafka-test',
|
||||||
|
'metadata.broker.list': kafkaBrokerList
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createTopic', function() {
|
||||||
|
it('should create topic sucessfully', function(done) {
|
||||||
|
var topicName = 'admin-test-topic-' + time;
|
||||||
|
this.timeout(30000);
|
||||||
|
client.createTopic({
|
||||||
|
topic: topicName,
|
||||||
|
num_partitions: 1,
|
||||||
|
replication_factor: 1
|
||||||
|
}, function(err) {
|
||||||
|
pollForTopic(producer, topicName, 10, 1000, function(err) {
|
||||||
|
t.ifError(err);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should raise an error when replication_factor is larger than number of brokers', function(done) {
|
||||||
|
var topicName = 'admin-test-topic-bad-' + time;
|
||||||
|
this.timeout(30000);
|
||||||
|
client.createTopic({
|
||||||
|
topic: topicName,
|
||||||
|
num_partitions: 9999,
|
||||||
|
replication_factor: 9999
|
||||||
|
}, function(err) {
|
||||||
|
t.equal(typeof err, 'object', 'an error should be returned');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('deleteTopic', function() {
|
||||||
|
it('should be able to delete a topic after creation', function(done) {
|
||||||
|
var topicName = 'admin-test-topic-2bdeleted-' + time;
|
||||||
|
this.timeout(30000);
|
||||||
|
client.createTopic({
|
||||||
|
topic: topicName,
|
||||||
|
num_partitions: 1,
|
||||||
|
replication_factor: 1
|
||||||
|
}, function(err) {
|
||||||
|
pollForTopic(producer, topicName, 10, 1000, function(err) {
|
||||||
|
t.ifError(err);
|
||||||
|
client.deleteTopic(topicName, function(deleteErr) {
|
||||||
|
// Fail if we got an error
|
||||||
|
t.ifError(deleteErr);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createPartitions', function() {
|
||||||
|
it('should be able to add partitions to a topic after creation', function(done) {
|
||||||
|
var topicName = 'admin-test-topic-newparts-' + time;
|
||||||
|
this.timeout(30000);
|
||||||
|
client.createTopic({
|
||||||
|
topic: topicName,
|
||||||
|
num_partitions: 1,
|
||||||
|
replication_factor: 1
|
||||||
|
}, function(err) {
|
||||||
|
pollForTopic(producer, topicName, 10, 1000, function(err) {
|
||||||
|
t.ifError(err);
|
||||||
|
client.createPartitions(topicName, 20, function(createErr) {
|
||||||
|
pollForTopic(producer, topicName, 10, 1000, function(pollErr) {
|
||||||
|
t.ifError(pollErr);
|
||||||
|
done();
|
||||||
|
}, function(topic) {
|
||||||
|
return topic.partitions.length === 20;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should NOT be able to reduce partitions to a topic after creation', function(done) {
|
||||||
|
var topicName = 'admin-test-topic-newparts2-' + time;
|
||||||
|
this.timeout(30000);
|
||||||
|
client.createTopic({
|
||||||
|
topic: topicName,
|
||||||
|
num_partitions: 4,
|
||||||
|
replication_factor: 1
|
||||||
|
}, function(err) {
|
||||||
|
pollForTopic(producer, topicName, 10, 1000, function(err) {
|
||||||
|
t.ifError(err);
|
||||||
|
client.createPartitions(topicName, 1, function(createErr) {
|
||||||
|
t.equal(typeof createErr, 'object', 'an error should be returned');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
690
e2e/both.spec.js
Normal file
690
e2e/both.spec.js
Normal file
@ -0,0 +1,690 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var crypto = require('crypto');
|
||||||
|
var t = require('assert');
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
|
||||||
|
var eventListener = require('./listener');
|
||||||
|
var topic = 'test';
|
||||||
|
var topic2 = 'test2';
|
||||||
|
|
||||||
|
describe('Consumer/Producer', function() {
|
||||||
|
|
||||||
|
var producer;
|
||||||
|
var consumer;
|
||||||
|
|
||||||
|
beforeEach(function(done) {
|
||||||
|
var finished = 0;
|
||||||
|
var called = false;
|
||||||
|
|
||||||
|
function maybeDone(err) {
|
||||||
|
if (called) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
finished++;
|
||||||
|
if (err) {
|
||||||
|
called = true;
|
||||||
|
return done(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (finished === 2) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var grp = 'kafka-mocha-grp-' + crypto.randomBytes(20).toString('hex');
|
||||||
|
|
||||||
|
consumer = new Kafka.KafkaConsumer({
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'group.id': grp,
|
||||||
|
'fetch.wait.max.ms': 1000,
|
||||||
|
'session.timeout.ms': 10000,
|
||||||
|
'enable.auto.commit': true,
|
||||||
|
'enable.partition.eof': true,
|
||||||
|
'debug': 'all'
|
||||||
|
// paused: true,
|
||||||
|
}, {
|
||||||
|
'auto.offset.reset': 'largest'
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.connect({}, function(err, d) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(typeof d, 'object', 'metadata should be returned');
|
||||||
|
maybeDone(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(consumer);
|
||||||
|
|
||||||
|
producer = new Kafka.Producer({
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'fetch.wait.max.ms': 1,
|
||||||
|
'debug': 'all',
|
||||||
|
'dr_cb': true
|
||||||
|
}, {
|
||||||
|
'produce.offset.report': true
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.connect({}, function(err, d) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(typeof d, 'object', 'metadata should be returned');
|
||||||
|
maybeDone(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(producer);
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
this.timeout(6000);
|
||||||
|
var finished = 0;
|
||||||
|
var called = false;
|
||||||
|
|
||||||
|
function maybeDone(err) {
|
||||||
|
if (called) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
finished++;
|
||||||
|
if (err) {
|
||||||
|
called = true;
|
||||||
|
return done(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (finished === 2) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
consumer.disconnect(function(err) {
|
||||||
|
maybeDone(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.disconnect(function(err) {
|
||||||
|
maybeDone(err);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce, consume messages, read position: subscribe/consumeOnce', function(done) {
|
||||||
|
this.timeout(8000);
|
||||||
|
crypto.randomBytes(4096, function(ex, buffer) {
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
var offset;
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
t.ifError(err);
|
||||||
|
offset = report.offset;
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.setDefaultConsumeTimeout(10);
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
|
||||||
|
var ct;
|
||||||
|
|
||||||
|
var consumeOne = function() {
|
||||||
|
consumer.consume(1, function(err, messages) {
|
||||||
|
if (err && err.code === -185) {
|
||||||
|
ct = setTimeout(consumeOne, 100);
|
||||||
|
return;
|
||||||
|
} else if (messages.length === 0 || (err && err.code === -191)) {
|
||||||
|
producer.produce(topic, null, buffer, null);
|
||||||
|
ct = setTimeout(consumeOne, 100);
|
||||||
|
return;
|
||||||
|
} else if (err) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var message = messages[0];
|
||||||
|
|
||||||
|
t.equal(Array.isArray(consumer.assignments()), true, 'Assignments should be an array');
|
||||||
|
t.equal(consumer.assignments().length > 0, true, 'Should have at least one assignment');
|
||||||
|
t.equal(buffer.toString(), message.value.toString(),
|
||||||
|
'message is not equal to buffer');
|
||||||
|
|
||||||
|
// test consumer.position as we have consumed
|
||||||
|
var position = consumer.position();
|
||||||
|
t.equal(position.length, 1);
|
||||||
|
t.deepStrictEqual(position[0].partition, 0);
|
||||||
|
t.ok(position[0].offset >= 0);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
// Consume until we get it or time out
|
||||||
|
consumeOne();
|
||||||
|
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return ready messages on partition EOF', function(done) {
|
||||||
|
this.timeout(8000);
|
||||||
|
crypto.randomBytes(4096, function(ex, buffer) {
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
t.ifError(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
|
||||||
|
var consumeAll = function() {
|
||||||
|
// Make sure we get the message fast when consuming with large timeout
|
||||||
|
consumer.setDefaultConsumeTimeout(1000000);
|
||||||
|
consumer.consume(100000, function(err, messages) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(messages.length, 1);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
var consumeNone = function() {
|
||||||
|
// With no new messages, the consume should wait whole timeout
|
||||||
|
var start = Date.now();
|
||||||
|
// Set the timeout to 2000ms to see that it actually waits the whole time
|
||||||
|
// (Needs to be higher than fetch.max.wait.ms which is 1000 here
|
||||||
|
// to ensure we don't only wait that long)
|
||||||
|
consumer.setDefaultConsumeTimeout(2000);
|
||||||
|
consumer.consume(100000, function(err, messages) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.ok(Date.now() - start >= 1998);
|
||||||
|
t.equal(messages.length, 0);
|
||||||
|
|
||||||
|
// Produce one message to cause EOF with waiting message when consuming all
|
||||||
|
producer.produce(topic, null, buffer, null);
|
||||||
|
consumeAll();
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
consumeNone();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should emit partition.eof event when reaching end of partition', function(done) {
|
||||||
|
this.timeout(8000);
|
||||||
|
crypto.randomBytes(4096, function(ex, buffer) {
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
t.ifError(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
|
||||||
|
var events = [];
|
||||||
|
|
||||||
|
consumer.once('data', function(msg) {
|
||||||
|
events.push("data");
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.once('partition.eof', function(eof) {
|
||||||
|
events.push("partition.eof");
|
||||||
|
});
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, buffer, null);
|
||||||
|
}, 500)
|
||||||
|
consumer.setDefaultConsumeTimeout(2000);
|
||||||
|
consumer.consume(1000, function(err, messages) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(messages.length, 1);
|
||||||
|
t.deepStrictEqual(events, ["data", "partition.eof"]);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should emit partition.eof when already at end of partition', function(done) {
|
||||||
|
this.timeout(8000);
|
||||||
|
crypto.randomBytes(4096, function(ex, buffer) {
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
t.ifError(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
|
||||||
|
var events = [];
|
||||||
|
|
||||||
|
consumer.once('data', function(msg) {
|
||||||
|
events.push("data");
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('partition.eof', function(eof) {
|
||||||
|
events.push("partition.eof");
|
||||||
|
});
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, buffer, null);
|
||||||
|
}, 2000)
|
||||||
|
consumer.setDefaultConsumeTimeout(3000);
|
||||||
|
consumer.consume(1000, function(err, messages) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(messages.length, 1);
|
||||||
|
t.deepStrictEqual(events, ["partition.eof", "data", "partition.eof"]);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages: consumeLoop', function(done) {
|
||||||
|
var key = 'key';
|
||||||
|
|
||||||
|
this.timeout(5000);
|
||||||
|
|
||||||
|
crypto.randomBytes(4096, function(ex, buffer) {
|
||||||
|
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
if (!err) {
|
||||||
|
t.equal(topic, report.topic, 'invalid delivery-report topic');
|
||||||
|
t.equal(key, report.key, 'invalid delivery-report key');
|
||||||
|
t.ok(report.offset >= 0, 'invalid delivery-report offset');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('data', function(message) {
|
||||||
|
t.equal(buffer.toString(), message.value.toString(), 'invalid message value');
|
||||||
|
t.equal(key, message.key, 'invalid message key');
|
||||||
|
t.equal(topic, message.topic, 'invalid message topic');
|
||||||
|
t.ok(message.offset >= 0, 'invalid message offset');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, buffer, key);
|
||||||
|
}, 2000);
|
||||||
|
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should emit \'partition.eof\' events in consumeLoop', function(done) {
|
||||||
|
this.timeout(7000);
|
||||||
|
|
||||||
|
crypto.randomBytes(4096, function(ex, buffer) {
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
t.ifError(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var events = [];
|
||||||
|
var offsets = [];
|
||||||
|
|
||||||
|
consumer.on('data', function(message) {
|
||||||
|
t.equal(message.topic, topic);
|
||||||
|
t.equal(message.partition, 0);
|
||||||
|
offsets.push(message.offset);
|
||||||
|
events.push('data');
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('partition.eof', function(eofEvent) {
|
||||||
|
t.equal(eofEvent.topic, topic);
|
||||||
|
t.equal(eofEvent.partition, 0);
|
||||||
|
offsets.push(eofEvent.offset);
|
||||||
|
events.push('partition.eof');
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, buffer);
|
||||||
|
}, 2000);
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, buffer);
|
||||||
|
}, 4000);
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
t.deepStrictEqual(events, ['partition.eof', 'data', 'partition.eof', 'data', 'partition.eof']);
|
||||||
|
var startOffset = offsets[0];
|
||||||
|
t.deepStrictEqual(offsets,
|
||||||
|
[ startOffset,
|
||||||
|
startOffset,
|
||||||
|
startOffset + 1,
|
||||||
|
startOffset + 1,
|
||||||
|
startOffset + 2 ]);
|
||||||
|
done();
|
||||||
|
}, 6000);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should emit [warning] event on UNKNOWN_TOPIC_OR_PART error: consumeLoop', function(done) {
|
||||||
|
consumer.on('warning', function (err) {
|
||||||
|
if (err.code === Kafka.CODES.ERRORS.ERR_UNKNOWN_TOPIC_OR_PART) {
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
t.ifError(err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe(['non_existing_topic']);
|
||||||
|
consumer.consume();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages with one header value as string: consumeLoop', function(done) {
|
||||||
|
var headers = [
|
||||||
|
{ key: "value" }
|
||||||
|
];
|
||||||
|
this.timeout(5000);
|
||||||
|
run_headers_test(done, headers);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages with one header value as buffer: consumeLoop', function(done) {
|
||||||
|
var headers = [
|
||||||
|
{ key: Buffer.from('value') }
|
||||||
|
];
|
||||||
|
this.timeout(5000);
|
||||||
|
run_headers_test(done, headers);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages with one header value as int: consumeLoop', function(done) {
|
||||||
|
var headers = [
|
||||||
|
{ key: 10 }
|
||||||
|
];
|
||||||
|
this.timeout(5000);
|
||||||
|
run_headers_test(done, headers);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages with one header value as float: consumeLoop', function(done) {
|
||||||
|
var headers = [
|
||||||
|
{ key: 1.11 }
|
||||||
|
];
|
||||||
|
this.timeout(5000);
|
||||||
|
run_headers_test(done, headers);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages with multiple headers value as buffer: consumeLoop', function(done) {
|
||||||
|
var headers = [
|
||||||
|
{ key1: Buffer.from('value1') },
|
||||||
|
{ key2: Buffer.from('value2') },
|
||||||
|
{ key3: Buffer.from('value3') },
|
||||||
|
{ key4: Buffer.from('value4') },
|
||||||
|
];
|
||||||
|
this.timeout(5000);
|
||||||
|
run_headers_test(done, headers);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages with multiple headers value as string: consumeLoop', function(done) {
|
||||||
|
var headers = [
|
||||||
|
{ key1: 'value1' },
|
||||||
|
{ key2: 'value2' },
|
||||||
|
{ key3: 'value3' },
|
||||||
|
{ key4: 'value4' },
|
||||||
|
];
|
||||||
|
this.timeout(5000);
|
||||||
|
run_headers_test(done, headers);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages with multiple headers with mixed values: consumeLoop', function(done) {
|
||||||
|
var headers = [
|
||||||
|
{ key1: 'value1' },
|
||||||
|
{ key2: Buffer.from('value2') },
|
||||||
|
{ key3: 100 },
|
||||||
|
{ key4: 10.1 },
|
||||||
|
];
|
||||||
|
this.timeout(5000);
|
||||||
|
run_headers_test(done, headers);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages: empty buffer key and empty value', function(done) {
|
||||||
|
this.timeout(20000);
|
||||||
|
var emptyString = '';
|
||||||
|
var key = Buffer.from(emptyString);
|
||||||
|
var value = Buffer.from('');
|
||||||
|
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
consumer.once('data', function(message) {
|
||||||
|
t.notEqual(message.value, null, 'message should not be null');
|
||||||
|
t.equal(value.toString(), message.value.toString(), 'invalid message value');
|
||||||
|
t.equal(emptyString, message.key, 'invalid message key');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, value, key);
|
||||||
|
}, 2000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages: empty key and empty value', function(done) {
|
||||||
|
this.timeout(20000);
|
||||||
|
var key = '';
|
||||||
|
var value = Buffer.from('');
|
||||||
|
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
consumer.once('data', function(message) {
|
||||||
|
t.notEqual(message.value, null, 'message should not be null');
|
||||||
|
t.equal(value.toString(), message.value.toString(), 'invalid message value');
|
||||||
|
t.equal(key, message.key, 'invalid message key');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, value, key);
|
||||||
|
}, 2000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to produce and consume messages: null key and null value', function(done) {
|
||||||
|
this.timeout(20000);
|
||||||
|
var key = null;
|
||||||
|
var value = null;
|
||||||
|
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
consumer.once('data', function(message) {
|
||||||
|
t.equal(value, message.value, 'invalid message value');
|
||||||
|
t.equal(key, message.key, 'invalid message key');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, value, key);
|
||||||
|
}, 2000);
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Exceptional case - offset_commit_cb true', function() {
|
||||||
|
var grp = 'kafka-mocha-grp-' + crypto.randomBytes(20).toString('hex');
|
||||||
|
var consumerOpts = {
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'group.id': grp,
|
||||||
|
'fetch.wait.max.ms': 1000,
|
||||||
|
'session.timeout.ms': 10000,
|
||||||
|
'enable.auto.commit': false,
|
||||||
|
'debug': 'all',
|
||||||
|
'offset_commit_cb': true
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(function(done) {
|
||||||
|
consumer = new Kafka.KafkaConsumer(consumerOpts, {
|
||||||
|
'auto.offset.reset': 'largest',
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.connect({}, function(err, d) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(typeof d, 'object', 'metadata should be returned');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(consumer);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
this.timeout(10000);
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should async commit after consuming', function(done) {
|
||||||
|
this.timeout(25000);
|
||||||
|
var key = '';
|
||||||
|
var value = Buffer.from('');
|
||||||
|
|
||||||
|
var lastOffset = null;
|
||||||
|
|
||||||
|
consumer.once('data', function(message) {
|
||||||
|
lastOffset = message.offset;
|
||||||
|
|
||||||
|
// disconnect in offset commit callback
|
||||||
|
consumer.on('offset.commit', function(err, offsets) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(typeof offsets, 'object', 'offsets should be returned');
|
||||||
|
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
// reconnect in disconnect callback
|
||||||
|
consumer.connect({}, function(err, d) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(typeof d, 'object', 'metadata should be returned');
|
||||||
|
|
||||||
|
// check that no new messages arrive, as the offset was committed
|
||||||
|
consumer.once('data', function(message) {
|
||||||
|
done(new Error('Should never be here'));
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
done();
|
||||||
|
}, 5000);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.commitMessage(message);
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, value, key);
|
||||||
|
}, 2000);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Exceptional case - offset_commit_cb function', function() {
|
||||||
|
var grp = 'kafka-mocha-grp-' + crypto.randomBytes(20).toString('hex');
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
this.timeout(10000);
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should callback offset_commit_cb after commit', function(done) {
|
||||||
|
this.timeout(20000);
|
||||||
|
|
||||||
|
var consumerOpts = {
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'group.id': grp,
|
||||||
|
'fetch.wait.max.ms': 1000,
|
||||||
|
'session.timeout.ms': 10000,
|
||||||
|
'enable.auto.commit': false,
|
||||||
|
'debug': 'all',
|
||||||
|
'offset_commit_cb': function(offset) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
consumer = new Kafka.KafkaConsumer(consumerOpts, {
|
||||||
|
'auto.offset.reset': 'largest',
|
||||||
|
});
|
||||||
|
eventListener(consumer);
|
||||||
|
|
||||||
|
consumer.connect({}, function(err, d) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(typeof d, 'object', 'metadata should be returned');
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
setTimeout(function() {
|
||||||
|
producer.produce(topic, null, Buffer.from(''), '');
|
||||||
|
}, 2000);
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.once('data', function(message) {
|
||||||
|
consumer.commitMessage(message);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
function assert_headers_match(expectedHeaders, messageHeaders) {
|
||||||
|
t.equal(expectedHeaders.length, messageHeaders.length, 'Headers length does not match expected length');
|
||||||
|
for (var i = 0; i < expectedHeaders.length; i++) {
|
||||||
|
var expectedKey = Object.keys(expectedHeaders[i])[0];
|
||||||
|
var messageKey = Object.keys(messageHeaders[i]);
|
||||||
|
t.equal(messageKey.length, 1, 'Expected only one Header key');
|
||||||
|
t.equal(expectedKey, messageKey[0], 'Expected key does not match message key');
|
||||||
|
var expectedValue = Buffer.isBuffer(expectedHeaders[i][expectedKey]) ?
|
||||||
|
expectedHeaders[i][expectedKey].toString() :
|
||||||
|
expectedHeaders[i][expectedKey];
|
||||||
|
var actualValue = messageHeaders[i][expectedKey].toString();
|
||||||
|
t.equal(expectedValue, actualValue, 'invalid message header');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function run_headers_test(done, headers) {
|
||||||
|
var key = 'key';
|
||||||
|
|
||||||
|
crypto.randomBytes(4096, function(ex, buffer) {
|
||||||
|
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
if (!err) {
|
||||||
|
t.equal(topic, report.topic, 'invalid delivery-report topic');
|
||||||
|
t.equal(key, report.key, 'invalid delivery-report key');
|
||||||
|
t.ok(report.offset >= 0, 'invalid delivery-report offset');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('data', function(message) {
|
||||||
|
t.equal(buffer.toString(), message.value.toString(), 'invalid message value');
|
||||||
|
t.equal(key, message.key, 'invalid message key');
|
||||||
|
t.equal(topic, message.topic, 'invalid message topic');
|
||||||
|
t.ok(message.offset >= 0, 'invalid message offset');
|
||||||
|
assert_headers_match(headers, message.headers);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
|
||||||
|
setTimeout(function() {
|
||||||
|
var timestamp = new Date().getTime();
|
||||||
|
producer.produce(topic, null, buffer, key, timestamp, "", headers);
|
||||||
|
}, 2000);
|
||||||
|
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
350
e2e/consumer.spec.js
Normal file
350
e2e/consumer.spec.js
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var t = require('assert');
|
||||||
|
var crypto = require('crypto');
|
||||||
|
|
||||||
|
var eventListener = require('./listener');
|
||||||
|
|
||||||
|
var KafkaConsumer = require('../').KafkaConsumer;
|
||||||
|
|
||||||
|
var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
|
||||||
|
var topic = 'test';
|
||||||
|
|
||||||
|
describe('Consumer', function() {
|
||||||
|
var gcfg;
|
||||||
|
|
||||||
|
beforeEach(function() {
|
||||||
|
var grp = 'kafka-mocha-grp-' + crypto.randomBytes(20).toString('hex');
|
||||||
|
gcfg = {
|
||||||
|
'bootstrap.servers': kafkaBrokerList,
|
||||||
|
'group.id': grp,
|
||||||
|
'debug': 'all',
|
||||||
|
'rebalance_cb': true,
|
||||||
|
'enable.auto.commit': false
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('commit', function() {
|
||||||
|
var consumer;
|
||||||
|
beforeEach(function(done) {
|
||||||
|
consumer = new KafkaConsumer(gcfg, {});
|
||||||
|
|
||||||
|
consumer.connect({ timeout: 2000 }, function(err, info) {
|
||||||
|
t.ifError(err);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(consumer);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should allow commit with an array', function(done) {
|
||||||
|
consumer.commit([{ topic: topic, partition: 0, offset: -1 }]);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should allow commit without an array', function(done) {
|
||||||
|
consumer.commit({ topic: topic, partition: 0, offset: -1 });
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('committed and position', function() {
|
||||||
|
var consumer;
|
||||||
|
beforeEach(function(done) {
|
||||||
|
consumer = new KafkaConsumer(gcfg, {});
|
||||||
|
|
||||||
|
consumer.connect({ timeout: 2000 }, function(err, info) {
|
||||||
|
t.ifError(err);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(consumer);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('before assign, committed offsets are empty', function(done) {
|
||||||
|
consumer.committed(null, 1000, function(err, committed) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(Array.isArray(committed), true, 'Committed offsets should be an array');
|
||||||
|
t.equal(committed.length, 0);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('before assign, position returns an empty array', function() {
|
||||||
|
var position = consumer.position();
|
||||||
|
t.equal(Array.isArray(position), true, 'Position should be an array');
|
||||||
|
t.equal(position.length, 0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('after assign, should get committed array without offsets ', function(done) {
|
||||||
|
consumer.assign([{topic:topic, partition:0}]);
|
||||||
|
// Defer this for a second
|
||||||
|
setTimeout(function() {
|
||||||
|
consumer.committed(null, 1000, function(err, committed) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(committed.length, 1);
|
||||||
|
t.equal(typeof committed[0], 'object', 'TopicPartition should be an object');
|
||||||
|
t.deepStrictEqual(committed[0].partition, 0);
|
||||||
|
t.equal(committed[0].offset, undefined);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
}, 1000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('after assign and commit, should get committed offsets', function(done) {
|
||||||
|
this.timeout(6000);
|
||||||
|
consumer.assign([{topic:topic, partition:0}]);
|
||||||
|
consumer.commitSync({topic:topic, partition:0, offset:1000});
|
||||||
|
consumer.committed(null, 1000, function(err, committed) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(committed.length, 1);
|
||||||
|
t.equal(typeof committed[0], 'object', 'TopicPartition should be an object');
|
||||||
|
t.deepStrictEqual(committed[0].partition, 0);
|
||||||
|
t.deepStrictEqual(committed[0].offset, 1000);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('after assign, before consume, position should return an array without offsets', function(done) {
|
||||||
|
consumer.assign([{topic:topic, partition:0}]);
|
||||||
|
var position = consumer.position();
|
||||||
|
t.equal(Array.isArray(position), true, 'Position should be an array');
|
||||||
|
t.equal(position.length, 1);
|
||||||
|
t.equal(typeof position[0], 'object', 'TopicPartition should be an object');
|
||||||
|
t.deepStrictEqual(position[0].partition, 0);
|
||||||
|
t.equal(position[0].offset, undefined, 'before consuming, offset is undefined');
|
||||||
|
// see both.spec.js 'should be able to produce, consume messages, read position...'
|
||||||
|
// for checking of offset numeric value
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should obey the timeout', function(done) {
|
||||||
|
consumer.committed(null, 0, function(err, committed) {
|
||||||
|
if (!err) {
|
||||||
|
t.fail(err, 'not null', 'Error should be set for a timeout');
|
||||||
|
}
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('seek and positioning', function() {
|
||||||
|
var consumer;
|
||||||
|
beforeEach(function(done) {
|
||||||
|
consumer = new KafkaConsumer(gcfg, {});
|
||||||
|
|
||||||
|
consumer.connect({ timeout: 2000 }, function(err, info) {
|
||||||
|
t.ifError(err);
|
||||||
|
consumer.assign([{
|
||||||
|
topic: 'test',
|
||||||
|
partition: 0,
|
||||||
|
offset: 0
|
||||||
|
}]);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(consumer);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to seek', function(cb) {
|
||||||
|
consumer.seek({
|
||||||
|
topic: 'test',
|
||||||
|
partition: 0,
|
||||||
|
offset: 0
|
||||||
|
}, 1, function(err) {
|
||||||
|
t.ifError(err);
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to seek with a timeout of 0', function(cb) {
|
||||||
|
consumer.seek({
|
||||||
|
topic: 'test',
|
||||||
|
partition: 0,
|
||||||
|
offset: 0
|
||||||
|
}, 0, function(err) {
|
||||||
|
t.ifError(err);
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('subscribe', function() {
|
||||||
|
|
||||||
|
var consumer;
|
||||||
|
beforeEach(function(done) {
|
||||||
|
consumer = new KafkaConsumer(gcfg, {});
|
||||||
|
|
||||||
|
consumer.connect({ timeout: 2000 }, function(err, info) {
|
||||||
|
t.ifError(err);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(consumer);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to subscribe', function() {
|
||||||
|
t.equal(0, consumer.subscription().length);
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
t.equal(1, consumer.subscription().length);
|
||||||
|
t.equal('test', consumer.subscription()[0]);
|
||||||
|
t.equal(0, consumer.assignments().length);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to unsubscribe', function() {
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
t.equal(1, consumer.subscription().length);
|
||||||
|
consumer.unsubscribe();
|
||||||
|
t.equal(0, consumer.subscription().length);
|
||||||
|
t.equal(0, consumer.assignments().length);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('assign', function() {
|
||||||
|
|
||||||
|
var consumer;
|
||||||
|
beforeEach(function(done) {
|
||||||
|
consumer = new KafkaConsumer(gcfg, {});
|
||||||
|
|
||||||
|
consumer.connect({ timeout: 2000 }, function(err, info) {
|
||||||
|
t.ifError(err);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(consumer);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to take an assignment', function() {
|
||||||
|
t.equal(0, consumer.assignments().length);
|
||||||
|
consumer.assign([{ topic:topic, partition:0 }]);
|
||||||
|
t.equal(1, consumer.assignments().length);
|
||||||
|
t.equal(topic, consumer.assignments()[0].topic);
|
||||||
|
t.equal(0, consumer.subscription().length);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to take an empty assignment', function() {
|
||||||
|
consumer.assign([{ topic:topic, partition:0 }]);
|
||||||
|
t.equal(1, consumer.assignments().length);
|
||||||
|
consumer.assign([]);
|
||||||
|
t.equal(0, consumer.assignments().length);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('disconnect', function() {
|
||||||
|
var tcfg = { 'auto.offset.reset': 'earliest' };
|
||||||
|
|
||||||
|
it('should happen gracefully', function(cb) {
|
||||||
|
var consumer = new KafkaConsumer(gcfg, tcfg);
|
||||||
|
|
||||||
|
consumer.connect({ timeout: 2000 }, function(err, info) {
|
||||||
|
t.ifError(err);
|
||||||
|
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should happen without issue after subscribing', function(cb) {
|
||||||
|
var consumer = new KafkaConsumer(gcfg, tcfg);
|
||||||
|
|
||||||
|
consumer.connect({ timeout: 2000 }, function(err, info) {
|
||||||
|
t.ifError(err);
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should happen without issue after consuming', function(cb) {
|
||||||
|
this.timeout(11000);
|
||||||
|
|
||||||
|
var consumer = new KafkaConsumer(gcfg, tcfg);
|
||||||
|
consumer.setDefaultConsumeTimeout(10000);
|
||||||
|
|
||||||
|
consumer.connect({ timeout: 2000 }, function(err, info) {
|
||||||
|
t.ifError(err);
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
|
||||||
|
consumer.consume(1, function(err, messages) {
|
||||||
|
t.ifError(err);
|
||||||
|
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should happen without issue after consuming an error', function(cb) {
|
||||||
|
var consumer = new KafkaConsumer(gcfg, tcfg);
|
||||||
|
|
||||||
|
consumer.setDefaultConsumeTimeout(1);
|
||||||
|
|
||||||
|
consumer.connect({ timeout: 2000 }, function(err, info) {
|
||||||
|
t.ifError(err);
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
|
||||||
|
consumer.consume(1, function(err, messages) {
|
||||||
|
|
||||||
|
// Timeouts do not classify as errors anymore
|
||||||
|
t.equal(messages[0], undefined, 'Message should not be set');
|
||||||
|
|
||||||
|
consumer.disconnect(function() {
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
});
|
133
e2e/groups.spec.js
Normal file
133
e2e/groups.spec.js
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var crypto = require('crypto');
|
||||||
|
var t = require('assert');
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
|
||||||
|
var eventListener = require('./listener');
|
||||||
|
|
||||||
|
describe('Consumer group/Producer', function() {
|
||||||
|
|
||||||
|
var producer;
|
||||||
|
var consumer;
|
||||||
|
var grp = 'kafka-mocha-grp-' + crypto.randomBytes(20).toString('hex');
|
||||||
|
|
||||||
|
var config = {
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'group.id': grp,
|
||||||
|
'fetch.wait.max.ms': 1000,
|
||||||
|
'session.timeout.ms': 10000,
|
||||||
|
'enable.auto.commit': false,
|
||||||
|
'debug': 'all'
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(function(done) {
|
||||||
|
producer = new Kafka.Producer({
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'fetch.wait.max.ms': 1,
|
||||||
|
'debug': 'all',
|
||||||
|
'dr_cb': true
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.connect({}, function(err, d) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(typeof d, 'object', 'metadata should be returned');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(producer);
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(function(done) {
|
||||||
|
consumer = new Kafka.KafkaConsumer(config, {
|
||||||
|
'auto.offset.reset': 'largest'
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.connect({}, function(err, d) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.equal(typeof d, 'object', 'metadata should be returned');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(consumer);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
producer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be able to commit, read committed and restart from the committed offset', function(done) {
|
||||||
|
this.timeout(30000);
|
||||||
|
var topic = 'test';
|
||||||
|
var key = 'key';
|
||||||
|
var payload = Buffer.from('value');
|
||||||
|
var count = 0;
|
||||||
|
var offsets = {
|
||||||
|
'first': true
|
||||||
|
};
|
||||||
|
|
||||||
|
var tt = setInterval(function() {
|
||||||
|
try {
|
||||||
|
producer.produce(topic, null, payload, key);
|
||||||
|
} catch (e) {
|
||||||
|
clearInterval(tt);
|
||||||
|
}
|
||||||
|
}, 100);
|
||||||
|
|
||||||
|
consumer.on('disconnected', function() {
|
||||||
|
var consumer2 = new Kafka.KafkaConsumer(config, {
|
||||||
|
'auto.offset.reset': 'largest'
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer2.on('data', function(message) {
|
||||||
|
if (offsets.first) {
|
||||||
|
offsets.first = false;
|
||||||
|
t.deepStrictEqual(offsets.committed, message.offset, 'Offset read by consumer 2 incorrect');
|
||||||
|
clearInterval(tt);
|
||||||
|
consumer2.unsubscribe();
|
||||||
|
consumer2.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer2.on('ready', function() {
|
||||||
|
consumer2.subscribe([topic]);
|
||||||
|
consumer2.consume();
|
||||||
|
});
|
||||||
|
consumer2.connect();
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('data', function(message) {
|
||||||
|
count++;
|
||||||
|
if (count === 3) {
|
||||||
|
consumer.commitMessageSync(message);
|
||||||
|
// test consumer.committed( ) API
|
||||||
|
consumer.committed(null, 5000, function(err, topicPartitions) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.deepStrictEqual(topicPartitions.length, 1);
|
||||||
|
t.deepStrictEqual(topicPartitions[0].offset, message.offset + 1, 'Offset read by consumer 1 incorrect');
|
||||||
|
offsets.committed = message.offset + 1;
|
||||||
|
consumer.unsubscribe();
|
||||||
|
consumer.disconnect();
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.subscribe([topic]);
|
||||||
|
consumer.consume();
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
51
e2e/listener.js
Normal file
51
e2e/listener.js
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
module.exports = eventListener;
|
||||||
|
|
||||||
|
function eventListener(client) {
|
||||||
|
if (!process.env.DEBUG) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
client
|
||||||
|
.on('event.error', function (err) {
|
||||||
|
console.error(err);
|
||||||
|
})
|
||||||
|
.on('event.log', function(event) {
|
||||||
|
var info = {
|
||||||
|
severity: event.severity,
|
||||||
|
fac: event.fac,
|
||||||
|
};
|
||||||
|
if (event.severity >= 7) {
|
||||||
|
console.error(info, event.message);
|
||||||
|
} else if (event.severity === 6 || event.severity === 5) {
|
||||||
|
console.error(info, event.message);
|
||||||
|
} else if (event.severity === 4) {
|
||||||
|
console.error(info, event.message);
|
||||||
|
} else if (event.severity > 0) {
|
||||||
|
console.error(info, event.message);
|
||||||
|
} else {
|
||||||
|
console.error(info, event.message);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.on('event.stats', function(event) {
|
||||||
|
console.log(event, event.message);
|
||||||
|
})
|
||||||
|
.on('event.throttle', function(event) {
|
||||||
|
console.log(event, '%s#%d throttled.', event.brokerName, event.brokerId);
|
||||||
|
// event.throttleTime;
|
||||||
|
})
|
||||||
|
.on('event.event', function(event) {
|
||||||
|
console.log(event, event.message);
|
||||||
|
})
|
||||||
|
.on('ready', function(info) {
|
||||||
|
console.log('%s connected to kafka server', info.name);
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
334
e2e/producer-transaction.spec.js
Normal file
334
e2e/producer-transaction.spec.js
Normal file
@ -0,0 +1,334 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
|
||||||
|
var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
|
||||||
|
|
||||||
|
describe('Transactional Producer', function () {
|
||||||
|
this.timeout(5000);
|
||||||
|
var TRANSACTIONS_TIMEOUT_MS = 30000;
|
||||||
|
var r = Date.now() + '_' + Math.round(Math.random() * 1000);
|
||||||
|
var topicIn = 'transaction_input_' + r;
|
||||||
|
var topicOut = 'transaction_output_' + r;
|
||||||
|
|
||||||
|
var producerTras;
|
||||||
|
var consumerTrans;
|
||||||
|
|
||||||
|
before(function (done) {
|
||||||
|
/*
|
||||||
|
prepare:
|
||||||
|
transactional consumer (read from input topic)
|
||||||
|
transactional producer (write to output topic)
|
||||||
|
write 3 messages to input topic: A, B, C
|
||||||
|
A will be skipped, B will be committed, C will be aborted
|
||||||
|
*/
|
||||||
|
var connecting = 3;
|
||||||
|
var producerInput;
|
||||||
|
function connectedCb(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
connecting--;
|
||||||
|
if (connecting === 0) {
|
||||||
|
producerInput.produce(topicIn, -1, Buffer.from('A'));
|
||||||
|
producerInput.produce(topicIn, -1, Buffer.from('B'));
|
||||||
|
producerInput.produce(topicIn, -1, Buffer.from('C'));
|
||||||
|
producerInput.disconnect(function (err) {
|
||||||
|
consumerTrans.subscribe([topicIn]);
|
||||||
|
done(err);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
producerInput = Kafka.Producer({
|
||||||
|
'client.id': 'kafka-test',
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'enable.idempotence': true
|
||||||
|
});
|
||||||
|
producerInput.setPollInterval(100);
|
||||||
|
producerInput.connect({}, connectedCb);
|
||||||
|
|
||||||
|
producerTras = new Kafka.Producer({
|
||||||
|
'client.id': 'kafka-test',
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'dr_cb': true,
|
||||||
|
'debug': 'all',
|
||||||
|
'transactional.id': 'noderdkafka_transactions_send_offset',
|
||||||
|
'enable.idempotence': true
|
||||||
|
});
|
||||||
|
producerTras.setPollInterval(100);
|
||||||
|
producerTras.connect({}, connectedCb);
|
||||||
|
|
||||||
|
consumerTrans = new Kafka.KafkaConsumer({
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'group.id': 'gropu_transaction_consumer',
|
||||||
|
'enable.auto.commit': false
|
||||||
|
}, {
|
||||||
|
'auto.offset.reset': 'earliest',
|
||||||
|
});
|
||||||
|
consumerTrans.connect({}, connectedCb);
|
||||||
|
});
|
||||||
|
|
||||||
|
after(function (done) {
|
||||||
|
let connected = 2;
|
||||||
|
function execDisconnect(client) {
|
||||||
|
if (!client.isConnected) {
|
||||||
|
connected--;
|
||||||
|
if (connected === 0) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
client.disconnect(function() {
|
||||||
|
connected--;
|
||||||
|
if (connected === 0) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
execDisconnect(producerTras);
|
||||||
|
execDisconnect(consumerTrans);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should init transactions', function(done) {
|
||||||
|
producerTras.initTransactions(TRANSACTIONS_TIMEOUT_MS, function (err) {
|
||||||
|
done(err);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should complete transaction', function(done) {
|
||||||
|
function readMessage() {
|
||||||
|
consumerTrans.consume(1, function(err, m) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (m.length === 0) {
|
||||||
|
readMessage();
|
||||||
|
} else {
|
||||||
|
var v = m[0].value.toString();
|
||||||
|
if (v === 'A') { // skip first message
|
||||||
|
readMessage();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (v !== 'B') {
|
||||||
|
done('Expected B');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
producerTras.beginTransaction(function (err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
producerTras.produce(topicOut, -1, Buffer.from(v));
|
||||||
|
var position = consumerTrans.position();
|
||||||
|
producerTras.sendOffsetsToTransaction(position, consumerTrans, function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
producerTras.commitTransaction(function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
consumerTrans.committed(5000, function(err, tpo) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (JSON.stringify(position) !== JSON.stringify(tpo)) {
|
||||||
|
done('Committed mismatch');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
readMessage();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('abort transaction', function() {
|
||||||
|
var lastConsumerTransPosition;
|
||||||
|
before(function(done) {
|
||||||
|
function readMessage() {
|
||||||
|
consumerTrans.consume(1, function(err, m) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (m.length === 0) {
|
||||||
|
readMessage();
|
||||||
|
} else {
|
||||||
|
var v = m[0].value.toString();
|
||||||
|
if (v !== 'C') {
|
||||||
|
done('Expected C');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
producerTras.beginTransaction(function (err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
producerTras.produce(topicOut, -1, Buffer.from(v));
|
||||||
|
lastConsumerTransPosition = consumerTrans.position();
|
||||||
|
producerTras.sendOffsetsToTransaction(lastConsumerTransPosition, consumerTrans, function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
readMessage();
|
||||||
|
});
|
||||||
|
|
||||||
|
it ('should consume committed and uncommitted for read_uncommitted', function(done) {
|
||||||
|
var allMsgs = [];
|
||||||
|
var consumer = new Kafka.KafkaConsumer({
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'group.id': 'group_read_uncommitted',
|
||||||
|
'enable.auto.commit': false,
|
||||||
|
'isolation.level': 'read_uncommitted'
|
||||||
|
}, {
|
||||||
|
'auto.offset.reset': 'earliest',
|
||||||
|
});
|
||||||
|
consumer.connect({}, function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
consumer.subscribe([topicOut]);
|
||||||
|
consumer.consume();
|
||||||
|
});
|
||||||
|
consumer.on('data', function(msg) {
|
||||||
|
var v = msg.value.toString();
|
||||||
|
allMsgs.push(v);
|
||||||
|
// both B and C must be consumed
|
||||||
|
if (allMsgs.length === 2 && allMsgs[0] === 'B' && allMsgs[1] === 'C') {
|
||||||
|
consumer.disconnect(function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
done();
|
||||||
|
})
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it ('should consume only committed for read_committed', function(done) {
|
||||||
|
var allMsgs = [];
|
||||||
|
var consumer = new Kafka.KafkaConsumer({
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'group.id': 'group_read_committed',
|
||||||
|
'enable.partition.eof': true,
|
||||||
|
'enable.auto.commit': false,
|
||||||
|
'isolation.level': 'read_committed'
|
||||||
|
}, {
|
||||||
|
'auto.offset.reset': 'earliest',
|
||||||
|
});
|
||||||
|
consumer.connect({}, function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
consumer.subscribe([topicOut]);
|
||||||
|
consumer.consume();
|
||||||
|
});
|
||||||
|
consumer.on('data', function(msg) {
|
||||||
|
var v = msg.value.toString();
|
||||||
|
allMsgs.push(v);
|
||||||
|
});
|
||||||
|
consumer.on('partition.eof', function(eof) {
|
||||||
|
if (allMsgs.length === 1 && allMsgs[0] === 'B') {
|
||||||
|
consumer.disconnect(function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
done();
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
done('Expected only B');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should abort transaction', function(done) {
|
||||||
|
producerTras.abortTransaction(function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
consumerTrans.committed(5000, function(err, tpo) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (lastConsumerTransPosition[0].offset <= tpo[0].offset) {
|
||||||
|
done('Committed mismatch');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should consume only committed', function(done) {
|
||||||
|
var gotB = false;
|
||||||
|
var consumer = new Kafka.KafkaConsumer({
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'group.id': 'group_default',
|
||||||
|
'enable.partition.eof': true,
|
||||||
|
'enable.auto.commit': false,
|
||||||
|
}, {
|
||||||
|
'auto.offset.reset': 'earliest',
|
||||||
|
});
|
||||||
|
consumer.connect({}, function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
consumer.subscribe([topicOut]);
|
||||||
|
consumer.consume();
|
||||||
|
});
|
||||||
|
consumer.on('data', function(msg) {
|
||||||
|
var v = msg.value.toString();
|
||||||
|
if (v !== 'B') {
|
||||||
|
done('Expected B');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
gotB = true;
|
||||||
|
});
|
||||||
|
consumer.on('partition.eof', function(eof) {
|
||||||
|
if (!gotB) {
|
||||||
|
done('Expected B');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
consumer.disconnect(function(err) {
|
||||||
|
if (err) {
|
||||||
|
done(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
300
e2e/producer.spec.js
Normal file
300
e2e/producer.spec.js
Normal file
@ -0,0 +1,300 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Kafka = require('../');
|
||||||
|
var t = require('assert');
|
||||||
|
var crypto = require('crypto');
|
||||||
|
|
||||||
|
var eventListener = require('./listener');
|
||||||
|
|
||||||
|
var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092';
|
||||||
|
|
||||||
|
var serviceStopped = false;
|
||||||
|
|
||||||
|
describe('Producer', function() {
|
||||||
|
|
||||||
|
var producer;
|
||||||
|
|
||||||
|
describe('with dr_cb', function() {
|
||||||
|
beforeEach(function(done) {
|
||||||
|
producer = new Kafka.Producer({
|
||||||
|
'client.id': 'kafka-test',
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'dr_cb': true,
|
||||||
|
'debug': 'all'
|
||||||
|
});
|
||||||
|
producer.connect({}, function(err) {
|
||||||
|
t.ifError(err);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(producer);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
producer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should connect to Kafka', function(done) {
|
||||||
|
producer.getMetadata({}, function(err, metadata) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.ok(metadata);
|
||||||
|
|
||||||
|
// Ensure it is in the correct format
|
||||||
|
t.ok(metadata.orig_broker_name, 'Broker name is not set');
|
||||||
|
t.notStrictEqual(metadata.orig_broker_id, undefined, 'Broker id is not set');
|
||||||
|
t.equal(Array.isArray(metadata.brokers), true);
|
||||||
|
t.equal(Array.isArray(metadata.topics), true);
|
||||||
|
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should produce a message with a null payload and null key', function(done) {
|
||||||
|
this.timeout(3000);
|
||||||
|
|
||||||
|
var tt = setInterval(function() {
|
||||||
|
producer.poll();
|
||||||
|
}, 200);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
clearInterval(tt);
|
||||||
|
t.ifError(err);
|
||||||
|
t.notStrictEqual(report, undefined);
|
||||||
|
t.strictEqual(typeof report.topic, 'string');
|
||||||
|
t.strictEqual(typeof report.partition, 'number');
|
||||||
|
t.strictEqual(typeof report.offset, 'number');
|
||||||
|
t.strictEqual( report.key, null);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.produce('test', null, null, null);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should produce a message with a payload and key', function(done) {
|
||||||
|
this.timeout(3000);
|
||||||
|
|
||||||
|
var tt = setInterval(function() {
|
||||||
|
producer.poll();
|
||||||
|
}, 200);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
clearInterval(tt);
|
||||||
|
t.ifError(err);
|
||||||
|
t.notStrictEqual(report, undefined);
|
||||||
|
t.strictEqual(report.value, undefined);
|
||||||
|
t.strictEqual(typeof report.topic, 'string');
|
||||||
|
t.strictEqual(typeof report.partition, 'number');
|
||||||
|
t.strictEqual(typeof report.offset, 'number');
|
||||||
|
t.equal(report.key, 'key');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.produce('test', null, Buffer.from('value'), 'key');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should produce a message with a payload and key buffer', function(done) {
|
||||||
|
this.timeout(3000);
|
||||||
|
|
||||||
|
var tt = setInterval(function() {
|
||||||
|
producer.poll();
|
||||||
|
}, 200);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
clearInterval(tt);
|
||||||
|
t.ifError(err);
|
||||||
|
t.notStrictEqual(report, undefined);
|
||||||
|
t.strictEqual(report.value, undefined);
|
||||||
|
t.strictEqual(typeof report.topic, 'string');
|
||||||
|
t.strictEqual(typeof report.partition, 'number');
|
||||||
|
t.strictEqual(typeof report.offset, 'number');
|
||||||
|
t.equal(report.key.length > 3, true);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.produce('test', null, Buffer.from('value'), Buffer.from('key\0s'));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should produce a message with an opaque', function(done) {
|
||||||
|
this.timeout(3000);
|
||||||
|
|
||||||
|
var tt = setInterval(function() {
|
||||||
|
producer.poll();
|
||||||
|
}, 200);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
clearInterval(tt);
|
||||||
|
t.ifError(err);
|
||||||
|
t.notStrictEqual(report, undefined);
|
||||||
|
t.strictEqual(typeof report.topic, 'string');
|
||||||
|
t.strictEqual(typeof report.partition, 'number');
|
||||||
|
t.strictEqual(typeof report.offset, 'number');
|
||||||
|
t.equal(report.opaque, 'opaque');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.produce('test', null, Buffer.from('value'), null, null, 'opaque');
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
it('should get 100% deliverability', function(done) {
|
||||||
|
this.timeout(3000);
|
||||||
|
|
||||||
|
var total = 0;
|
||||||
|
var max = 10000;
|
||||||
|
var verified_received = 0;
|
||||||
|
|
||||||
|
var tt = setInterval(function() {
|
||||||
|
producer.poll();
|
||||||
|
}, 200);
|
||||||
|
|
||||||
|
producer
|
||||||
|
.on('delivery-report', function(err, report) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.notStrictEqual(report, undefined);
|
||||||
|
t.strictEqual(typeof report.topic, 'string');
|
||||||
|
t.strictEqual(typeof report.partition, 'number');
|
||||||
|
t.strictEqual(typeof report.offset, 'number');
|
||||||
|
verified_received++;
|
||||||
|
if (verified_received === max) {
|
||||||
|
clearInterval(tt);
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Produce
|
||||||
|
for (total = 0; total <= max; total++) {
|
||||||
|
producer.produce('test', null, Buffer.from('message ' + total), null);
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('with_dr_msg_cb', function() {
|
||||||
|
beforeEach(function(done) {
|
||||||
|
producer = new Kafka.Producer({
|
||||||
|
'client.id': 'kafka-test',
|
||||||
|
'metadata.broker.list': kafkaBrokerList,
|
||||||
|
'dr_msg_cb': true,
|
||||||
|
'debug': 'all'
|
||||||
|
});
|
||||||
|
producer.connect({}, function(err) {
|
||||||
|
t.ifError(err);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
eventListener(producer);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(function(done) {
|
||||||
|
producer.disconnect(function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should produce a message with a payload and key', function(done) {
|
||||||
|
this.timeout(3000);
|
||||||
|
|
||||||
|
var tt = setInterval(function() {
|
||||||
|
producer.poll();
|
||||||
|
}, 200);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
clearInterval(tt);
|
||||||
|
t.ifError(err);
|
||||||
|
t.notStrictEqual(report, undefined);
|
||||||
|
t.strictEqual(typeof report.topic, 'string');
|
||||||
|
t.strictEqual(typeof report.partition, 'number');
|
||||||
|
t.strictEqual(typeof report.offset, 'number');
|
||||||
|
t.ok(report.key.toString(), 'key');
|
||||||
|
t.equal(report.value.toString(), 'hai');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.produce('test', null, Buffer.from('hai'), 'key');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should produce a message with an empty payload and empty key (https://github.com/Blizzard/node-rdkafka/issues/117)', function(done) {
|
||||||
|
this.timeout(3000);
|
||||||
|
|
||||||
|
var tt = setInterval(function() {
|
||||||
|
producer.poll();
|
||||||
|
}, 200);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
clearInterval(tt);
|
||||||
|
t.ifError(err);
|
||||||
|
t.notStrictEqual(report, undefined);
|
||||||
|
|
||||||
|
t.strictEqual(typeof report.topic, 'string');
|
||||||
|
t.strictEqual(typeof report.partition, 'number');
|
||||||
|
t.strictEqual(typeof report.offset, 'number');
|
||||||
|
t.equal(report.key.toString(), '', 'key should be an empty string');
|
||||||
|
t.strictEqual(report.value.toString(), '', 'payload should be an empty string');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.produce('test', null, Buffer.from(''), '');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should produce a message with a null payload and null key (https://github.com/Blizzard/node-rdkafka/issues/117)', function(done) {
|
||||||
|
this.timeout(3000);
|
||||||
|
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.notStrictEqual(report, undefined);
|
||||||
|
|
||||||
|
t.strictEqual(typeof report.topic, 'string');
|
||||||
|
t.strictEqual(typeof report.partition, 'number');
|
||||||
|
t.strictEqual(typeof report.offset, 'number');
|
||||||
|
t.strictEqual(report.key, null, 'key should be null');
|
||||||
|
t.strictEqual(report.value, null, 'payload should be null');
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.produce('test', null, null, null);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should produce an int64 key (https://github.com/Blizzard/node-rdkafka/issues/208)', function(done) {
|
||||||
|
|
||||||
|
var v1 = 0x0000000000000084;
|
||||||
|
var arr = new Uint8Array(8);
|
||||||
|
arr[0] = 0x00;
|
||||||
|
arr[1] = 0x00;
|
||||||
|
arr[2] = 0x00;
|
||||||
|
arr[3] = 0x00;
|
||||||
|
arr[4] = 0x00;
|
||||||
|
arr[5] = 0x00;
|
||||||
|
arr[6] = 0x00;
|
||||||
|
arr[7] = 84;
|
||||||
|
var buf = Buffer.from(arr.buffer);
|
||||||
|
|
||||||
|
producer.setPollInterval(10);
|
||||||
|
|
||||||
|
producer.once('delivery-report', function(err, report) {
|
||||||
|
t.ifError(err);
|
||||||
|
t.notStrictEqual(report, undefined);
|
||||||
|
|
||||||
|
t.deepEqual(buf, report.key);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.produce('test', null, null, Buffer.from(arr.buffer));
|
||||||
|
|
||||||
|
this.timeout(3000);
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
345
errors.d.ts
vendored
Normal file
345
errors.d.ts
vendored
Normal file
@ -0,0 +1,345 @@
|
|||||||
|
// ====== Generated from librdkafka 1.6.1 file src-cpp/rdkafkacpp.h ======
|
||||||
|
export const CODES: { ERRORS: {
|
||||||
|
/* Internal errors to rdkafka: */
|
||||||
|
/** Begin internal error codes (**-200**) */
|
||||||
|
ERR__BEGIN: number,
|
||||||
|
/** Received message is incorrect (**-199**) */
|
||||||
|
ERR__BAD_MSG: number,
|
||||||
|
/** Bad/unknown compression (**-198**) */
|
||||||
|
ERR__BAD_COMPRESSION: number,
|
||||||
|
/** Broker is going away (**-197**) */
|
||||||
|
ERR__DESTROY: number,
|
||||||
|
/** Generic failure (**-196**) */
|
||||||
|
ERR__FAIL: number,
|
||||||
|
/** Broker transport failure (**-195**) */
|
||||||
|
ERR__TRANSPORT: number,
|
||||||
|
/** Critical system resource (**-194**) */
|
||||||
|
ERR__CRIT_SYS_RESOURCE: number,
|
||||||
|
/** Failed to resolve broker (**-193**) */
|
||||||
|
ERR__RESOLVE: number,
|
||||||
|
/** Produced message timed out (**-192**) */
|
||||||
|
ERR__MSG_TIMED_OUT: number,
|
||||||
|
/** Reached the end of the topic+partition queue on
|
||||||
|
* the broker. Not really an error.
|
||||||
|
* This event is disabled by default,
|
||||||
|
* see the `enable.partition.eof` configuration property (**-191**) */
|
||||||
|
ERR__PARTITION_EOF: number,
|
||||||
|
/** Permanent: Partition does not exist in cluster (**-190**) */
|
||||||
|
ERR__UNKNOWN_PARTITION: number,
|
||||||
|
/** File or filesystem error (**-189**) */
|
||||||
|
ERR__FS: number,
|
||||||
|
/** Permanent: Topic does not exist in cluster (**-188**) */
|
||||||
|
ERR__UNKNOWN_TOPIC: number,
|
||||||
|
/** All broker connections are down (**-187**) */
|
||||||
|
ERR__ALL_BROKERS_DOWN: number,
|
||||||
|
/** Invalid argument, or invalid configuration (**-186**) */
|
||||||
|
ERR__INVALID_ARG: number,
|
||||||
|
/** Operation timed out (**-185**) */
|
||||||
|
ERR__TIMED_OUT: number,
|
||||||
|
/** Queue is full (**-184**) */
|
||||||
|
ERR__QUEUE_FULL: number,
|
||||||
|
/** ISR count < required.acks (**-183**) */
|
||||||
|
ERR__ISR_INSUFF: number,
|
||||||
|
/** Broker node update (**-182**) */
|
||||||
|
ERR__NODE_UPDATE: number,
|
||||||
|
/** SSL error (**-181**) */
|
||||||
|
ERR__SSL: number,
|
||||||
|
/** Waiting for coordinator to become available (**-180**) */
|
||||||
|
ERR__WAIT_COORD: number,
|
||||||
|
/** Unknown client group (**-179**) */
|
||||||
|
ERR__UNKNOWN_GROUP: number,
|
||||||
|
/** Operation in progress (**-178**) */
|
||||||
|
ERR__IN_PROGRESS: number,
|
||||||
|
/** Previous operation in progress, wait for it to finish (**-177**) */
|
||||||
|
ERR__PREV_IN_PROGRESS: number,
|
||||||
|
/** This operation would interfere with an existing subscription (**-176**) */
|
||||||
|
ERR__EXISTING_SUBSCRIPTION: number,
|
||||||
|
/** Assigned partitions (rebalance_cb) (**-175**) */
|
||||||
|
ERR__ASSIGN_PARTITIONS: number,
|
||||||
|
/** Revoked partitions (rebalance_cb) (**-174**) */
|
||||||
|
ERR__REVOKE_PARTITIONS: number,
|
||||||
|
/** Conflicting use (**-173**) */
|
||||||
|
ERR__CONFLICT: number,
|
||||||
|
/** Wrong state (**-172**) */
|
||||||
|
ERR__STATE: number,
|
||||||
|
/** Unknown protocol (**-171**) */
|
||||||
|
ERR__UNKNOWN_PROTOCOL: number,
|
||||||
|
/** Not implemented (**-170**) */
|
||||||
|
ERR__NOT_IMPLEMENTED: number,
|
||||||
|
/** Authentication failure (**-169**) */
|
||||||
|
ERR__AUTHENTICATION: number,
|
||||||
|
/** No stored offset (**-168**) */
|
||||||
|
ERR__NO_OFFSET: number,
|
||||||
|
/** Outdated (**-167**) */
|
||||||
|
ERR__OUTDATED: number,
|
||||||
|
/** Timed out in queue (**-166**) */
|
||||||
|
ERR__TIMED_OUT_QUEUE: number,
|
||||||
|
/** Feature not supported by broker (**-165**) */
|
||||||
|
ERR__UNSUPPORTED_FEATURE: number,
|
||||||
|
/** Awaiting cache update (**-164**) */
|
||||||
|
ERR__WAIT_CACHE: number,
|
||||||
|
/** Operation interrupted (**-163**) */
|
||||||
|
ERR__INTR: number,
|
||||||
|
/** Key serialization error (**-162**) */
|
||||||
|
ERR__KEY_SERIALIZATION: number,
|
||||||
|
/** Value serialization error (**-161**) */
|
||||||
|
ERR__VALUE_SERIALIZATION: number,
|
||||||
|
/** Key deserialization error (**-160**) */
|
||||||
|
ERR__KEY_DESERIALIZATION: number,
|
||||||
|
/** Value deserialization error (**-159**) */
|
||||||
|
ERR__VALUE_DESERIALIZATION: number,
|
||||||
|
/** Partial response (**-158**) */
|
||||||
|
ERR__PARTIAL: number,
|
||||||
|
/** Modification attempted on read-only object (**-157**) */
|
||||||
|
ERR__READ_ONLY: number,
|
||||||
|
/** No such entry / item not found (**-156**) */
|
||||||
|
ERR__NOENT: number,
|
||||||
|
/** Read underflow (**-155**) */
|
||||||
|
ERR__UNDERFLOW: number,
|
||||||
|
/** Invalid type (**-154**) */
|
||||||
|
ERR__INVALID_TYPE: number,
|
||||||
|
/** Retry operation (**-153**) */
|
||||||
|
ERR__RETRY: number,
|
||||||
|
/** Purged in queue (**-152**) */
|
||||||
|
ERR__PURGE_QUEUE: number,
|
||||||
|
/** Purged in flight (**-151**) */
|
||||||
|
ERR__PURGE_INFLIGHT: number,
|
||||||
|
/** Fatal error: see RdKafka::Handle::fatal_error() (**-150**) */
|
||||||
|
ERR__FATAL: number,
|
||||||
|
/** Inconsistent state (**-149**) */
|
||||||
|
ERR__INCONSISTENT: number,
|
||||||
|
/** Gap-less ordering would not be guaranteed if proceeding (**-148**) */
|
||||||
|
ERR__GAPLESS_GUARANTEE: number,
|
||||||
|
/** Maximum poll interval exceeded (**-147**) */
|
||||||
|
ERR__MAX_POLL_EXCEEDED: number,
|
||||||
|
/** Unknown broker (**-146**) */
|
||||||
|
ERR__UNKNOWN_BROKER: number,
|
||||||
|
/** Functionality not configured (**-145**) */
|
||||||
|
ERR__NOT_CONFIGURED: number,
|
||||||
|
/** Instance has been fenced (**-144**) */
|
||||||
|
ERR__FENCED: number,
|
||||||
|
/** Application generated error (**-143**) */
|
||||||
|
ERR__APPLICATION: number,
|
||||||
|
/** Assignment lost (**-142**) */
|
||||||
|
ERR__ASSIGNMENT_LOST: number,
|
||||||
|
/** No operation performed (**-141**) */
|
||||||
|
ERR__NOOP: number,
|
||||||
|
/** No offset to automatically reset to (**-140**) */
|
||||||
|
ERR__AUTO_OFFSET_RESET: number,
|
||||||
|
/** End internal error codes (**-100**) */
|
||||||
|
ERR__END: number,
|
||||||
|
/* Kafka broker errors: */
|
||||||
|
/** Unknown broker error (**-1**) */
|
||||||
|
ERR_UNKNOWN: number,
|
||||||
|
/** Success (**0**) */
|
||||||
|
ERR_NO_ERROR: number,
|
||||||
|
/** Offset out of range (**1**) */
|
||||||
|
ERR_OFFSET_OUT_OF_RANGE: number,
|
||||||
|
/** Invalid message (**2**) */
|
||||||
|
ERR_INVALID_MSG: number,
|
||||||
|
/** Unknown topic or partition (**3**) */
|
||||||
|
ERR_UNKNOWN_TOPIC_OR_PART: number,
|
||||||
|
/** Invalid message size (**4**) */
|
||||||
|
ERR_INVALID_MSG_SIZE: number,
|
||||||
|
/** Leader not available (**5**) */
|
||||||
|
ERR_LEADER_NOT_AVAILABLE: number,
|
||||||
|
/** Not leader for partition (**6**) */
|
||||||
|
ERR_NOT_LEADER_FOR_PARTITION: number,
|
||||||
|
/** Request timed out (**7**) */
|
||||||
|
ERR_REQUEST_TIMED_OUT: number,
|
||||||
|
/** Broker not available (**8**) */
|
||||||
|
ERR_BROKER_NOT_AVAILABLE: number,
|
||||||
|
/** Replica not available (**9**) */
|
||||||
|
ERR_REPLICA_NOT_AVAILABLE: number,
|
||||||
|
/** Message size too large (**10**) */
|
||||||
|
ERR_MSG_SIZE_TOO_LARGE: number,
|
||||||
|
/** StaleControllerEpochCode (**11**) */
|
||||||
|
ERR_STALE_CTRL_EPOCH: number,
|
||||||
|
/** Offset metadata string too large (**12**) */
|
||||||
|
ERR_OFFSET_METADATA_TOO_LARGE: number,
|
||||||
|
/** Broker disconnected before response received (**13**) */
|
||||||
|
ERR_NETWORK_EXCEPTION: number,
|
||||||
|
/** Coordinator load in progress (**14**) */
|
||||||
|
ERR_COORDINATOR_LOAD_IN_PROGRESS: number,
|
||||||
|
/** Group coordinator load in progress (**14**) */
|
||||||
|
ERR_GROUP_LOAD_IN_PROGRESS: number,
|
||||||
|
/** Coordinator not available (**15**) */
|
||||||
|
ERR_COORDINATOR_NOT_AVAILABLE: number,
|
||||||
|
/** Group coordinator not available (**15**) */
|
||||||
|
ERR_GROUP_COORDINATOR_NOT_AVAILABLE: number,
|
||||||
|
/** Not coordinator (**16**) */
|
||||||
|
ERR_NOT_COORDINATOR: number,
|
||||||
|
/** Not coordinator for group (**16**) */
|
||||||
|
ERR_NOT_COORDINATOR_FOR_GROUP: number,
|
||||||
|
/** Invalid topic (**17**) */
|
||||||
|
ERR_TOPIC_EXCEPTION: number,
|
||||||
|
/** Message batch larger than configured server segment size (**18**) */
|
||||||
|
ERR_RECORD_LIST_TOO_LARGE: number,
|
||||||
|
/** Not enough in-sync replicas (**19**) */
|
||||||
|
ERR_NOT_ENOUGH_REPLICAS: number,
|
||||||
|
/** Message(s) written to insufficient number of in-sync replicas (**20**) */
|
||||||
|
ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND: number,
|
||||||
|
/** Invalid required acks value (**21**) */
|
||||||
|
ERR_INVALID_REQUIRED_ACKS: number,
|
||||||
|
/** Specified group generation id is not valid (**22**) */
|
||||||
|
ERR_ILLEGAL_GENERATION: number,
|
||||||
|
/** Inconsistent group protocol (**23**) */
|
||||||
|
ERR_INCONSISTENT_GROUP_PROTOCOL: number,
|
||||||
|
/** Invalid group.id (**24**) */
|
||||||
|
ERR_INVALID_GROUP_ID: number,
|
||||||
|
/** Unknown member (**25**) */
|
||||||
|
ERR_UNKNOWN_MEMBER_ID: number,
|
||||||
|
/** Invalid session timeout (**26**) */
|
||||||
|
ERR_INVALID_SESSION_TIMEOUT: number,
|
||||||
|
/** Group rebalance in progress (**27**) */
|
||||||
|
ERR_REBALANCE_IN_PROGRESS: number,
|
||||||
|
/** Commit offset data size is not valid (**28**) */
|
||||||
|
ERR_INVALID_COMMIT_OFFSET_SIZE: number,
|
||||||
|
/** Topic authorization failed (**29**) */
|
||||||
|
ERR_TOPIC_AUTHORIZATION_FAILED: number,
|
||||||
|
/** Group authorization failed (**30**) */
|
||||||
|
ERR_GROUP_AUTHORIZATION_FAILED: number,
|
||||||
|
/** Cluster authorization failed (**31**) */
|
||||||
|
ERR_CLUSTER_AUTHORIZATION_FAILED: number,
|
||||||
|
/** Invalid timestamp (**32**) */
|
||||||
|
ERR_INVALID_TIMESTAMP: number,
|
||||||
|
/** Unsupported SASL mechanism (**33**) */
|
||||||
|
ERR_UNSUPPORTED_SASL_MECHANISM: number,
|
||||||
|
/** Illegal SASL state (**34**) */
|
||||||
|
ERR_ILLEGAL_SASL_STATE: number,
|
||||||
|
/** Unuspported version (**35**) */
|
||||||
|
ERR_UNSUPPORTED_VERSION: number,
|
||||||
|
/** Topic already exists (**36**) */
|
||||||
|
ERR_TOPIC_ALREADY_EXISTS: number,
|
||||||
|
/** Invalid number of partitions (**37**) */
|
||||||
|
ERR_INVALID_PARTITIONS: number,
|
||||||
|
/** Invalid replication factor (**38**) */
|
||||||
|
ERR_INVALID_REPLICATION_FACTOR: number,
|
||||||
|
/** Invalid replica assignment (**39**) */
|
||||||
|
ERR_INVALID_REPLICA_ASSIGNMENT: number,
|
||||||
|
/** Invalid config (**40**) */
|
||||||
|
ERR_INVALID_CONFIG: number,
|
||||||
|
/** Not controller for cluster (**41**) */
|
||||||
|
ERR_NOT_CONTROLLER: number,
|
||||||
|
/** Invalid request (**42**) */
|
||||||
|
ERR_INVALID_REQUEST: number,
|
||||||
|
/** Message format on broker does not support request (**43**) */
|
||||||
|
ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT: number,
|
||||||
|
/** Policy violation (**44**) */
|
||||||
|
ERR_POLICY_VIOLATION: number,
|
||||||
|
/** Broker received an out of order sequence number (**45**) */
|
||||||
|
ERR_OUT_OF_ORDER_SEQUENCE_NUMBER: number,
|
||||||
|
/** Broker received a duplicate sequence number (**46**) */
|
||||||
|
ERR_DUPLICATE_SEQUENCE_NUMBER: number,
|
||||||
|
/** Producer attempted an operation with an old epoch (**47**) */
|
||||||
|
ERR_INVALID_PRODUCER_EPOCH: number,
|
||||||
|
/** Producer attempted a transactional operation in an invalid state (**48**) */
|
||||||
|
ERR_INVALID_TXN_STATE: number,
|
||||||
|
/** Producer attempted to use a producer id which is not
|
||||||
|
* currently assigned to its transactional id (**49**) */
|
||||||
|
ERR_INVALID_PRODUCER_ID_MAPPING: number,
|
||||||
|
/** Transaction timeout is larger than the maximum
|
||||||
|
* value allowed by the broker's max.transaction.timeout.ms (**50**) */
|
||||||
|
ERR_INVALID_TRANSACTION_TIMEOUT: number,
|
||||||
|
/** Producer attempted to update a transaction while another
|
||||||
|
* concurrent operation on the same transaction was ongoing (**51**) */
|
||||||
|
ERR_CONCURRENT_TRANSACTIONS: number,
|
||||||
|
/** Indicates that the transaction coordinator sending a
|
||||||
|
* WriteTxnMarker is no longer the current coordinator for a
|
||||||
|
* given producer (**52**) */
|
||||||
|
ERR_TRANSACTION_COORDINATOR_FENCED: number,
|
||||||
|
/** Transactional Id authorization failed (**53**) */
|
||||||
|
ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: number,
|
||||||
|
/** Security features are disabled (**54**) */
|
||||||
|
ERR_SECURITY_DISABLED: number,
|
||||||
|
/** Operation not attempted (**55**) */
|
||||||
|
ERR_OPERATION_NOT_ATTEMPTED: number,
|
||||||
|
/** Disk error when trying to access log file on the disk (**56**) */
|
||||||
|
ERR_KAFKA_STORAGE_ERROR: number,
|
||||||
|
/** The user-specified log directory is not found in the broker config (**57**) */
|
||||||
|
ERR_LOG_DIR_NOT_FOUND: number,
|
||||||
|
/** SASL Authentication failed (**58**) */
|
||||||
|
ERR_SASL_AUTHENTICATION_FAILED: number,
|
||||||
|
/** Unknown Producer Id (**59**) */
|
||||||
|
ERR_UNKNOWN_PRODUCER_ID: number,
|
||||||
|
/** Partition reassignment is in progress (**60**) */
|
||||||
|
ERR_REASSIGNMENT_IN_PROGRESS: number,
|
||||||
|
/** Delegation Token feature is not enabled (**61**) */
|
||||||
|
ERR_DELEGATION_TOKEN_AUTH_DISABLED: number,
|
||||||
|
/** Delegation Token is not found on server (**62**) */
|
||||||
|
ERR_DELEGATION_TOKEN_NOT_FOUND: number,
|
||||||
|
/** Specified Principal is not valid Owner/Renewer (**63**) */
|
||||||
|
ERR_DELEGATION_TOKEN_OWNER_MISMATCH: number,
|
||||||
|
/** Delegation Token requests are not allowed on this connection (**64**) */
|
||||||
|
ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED: number,
|
||||||
|
/** Delegation Token authorization failed (**65**) */
|
||||||
|
ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED: number,
|
||||||
|
/** Delegation Token is expired (**66**) */
|
||||||
|
ERR_DELEGATION_TOKEN_EXPIRED: number,
|
||||||
|
/** Supplied principalType is not supported (**67**) */
|
||||||
|
ERR_INVALID_PRINCIPAL_TYPE: number,
|
||||||
|
/** The group is not empty (**68**) */
|
||||||
|
ERR_NON_EMPTY_GROUP: number,
|
||||||
|
/** The group id does not exist (**69**) */
|
||||||
|
ERR_GROUP_ID_NOT_FOUND: number,
|
||||||
|
/** The fetch session ID was not found (**70**) */
|
||||||
|
ERR_FETCH_SESSION_ID_NOT_FOUND: number,
|
||||||
|
/** The fetch session epoch is invalid (**71**) */
|
||||||
|
ERR_INVALID_FETCH_SESSION_EPOCH: number,
|
||||||
|
/** No matching listener (**72**) */
|
||||||
|
ERR_LISTENER_NOT_FOUND: number,
|
||||||
|
/** Topic deletion is disabled (**73**) */
|
||||||
|
ERR_TOPIC_DELETION_DISABLED: number,
|
||||||
|
/** Leader epoch is older than broker epoch (**74**) */
|
||||||
|
ERR_FENCED_LEADER_EPOCH: number,
|
||||||
|
/** Leader epoch is newer than broker epoch (**75**) */
|
||||||
|
ERR_UNKNOWN_LEADER_EPOCH: number,
|
||||||
|
/** Unsupported compression type (**76**) */
|
||||||
|
ERR_UNSUPPORTED_COMPRESSION_TYPE: number,
|
||||||
|
/** Broker epoch has changed (**77**) */
|
||||||
|
ERR_STALE_BROKER_EPOCH: number,
|
||||||
|
/** Leader high watermark is not caught up (**78**) */
|
||||||
|
ERR_OFFSET_NOT_AVAILABLE: number,
|
||||||
|
/** Group member needs a valid member ID (**79**) */
|
||||||
|
ERR_MEMBER_ID_REQUIRED: number,
|
||||||
|
/** Preferred leader was not available (**80**) */
|
||||||
|
ERR_PREFERRED_LEADER_NOT_AVAILABLE: number,
|
||||||
|
/** Consumer group has reached maximum size (**81**) */
|
||||||
|
ERR_GROUP_MAX_SIZE_REACHED: number,
|
||||||
|
/** Static consumer fenced by other consumer with same
|
||||||
|
* group.instance.id (**82**) */
|
||||||
|
ERR_FENCED_INSTANCE_ID: number,
|
||||||
|
/** Eligible partition leaders are not available (**83**) */
|
||||||
|
ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE: number,
|
||||||
|
/** Leader election not needed for topic partition (**84**) */
|
||||||
|
ERR_ELECTION_NOT_NEEDED: number,
|
||||||
|
/** No partition reassignment is in progress (**85**) */
|
||||||
|
ERR_NO_REASSIGNMENT_IN_PROGRESS: number,
|
||||||
|
/** Deleting offsets of a topic while the consumer group is
|
||||||
|
* subscribed to it (**86**) */
|
||||||
|
ERR_GROUP_SUBSCRIBED_TO_TOPIC: number,
|
||||||
|
/** Broker failed to validate record (**87**) */
|
||||||
|
ERR_INVALID_RECORD: number,
|
||||||
|
/** There are unstable offsets that need to be cleared (**88**) */
|
||||||
|
ERR_UNSTABLE_OFFSET_COMMIT: number,
|
||||||
|
/** Throttling quota has been exceeded (**89**) */
|
||||||
|
ERR_THROTTLING_QUOTA_EXCEEDED: number,
|
||||||
|
/** There is a newer producer with the same transactionalId
|
||||||
|
* which fences the current one (**90**) */
|
||||||
|
ERR_PRODUCER_FENCED: number,
|
||||||
|
/** Request illegally referred to resource that does not exist (**91**) */
|
||||||
|
ERR_RESOURCE_NOT_FOUND: number,
|
||||||
|
/** Request illegally referred to the same resource twice (**92**) */
|
||||||
|
ERR_DUPLICATE_RESOURCE: number,
|
||||||
|
/** Requested credential would not meet criteria for acceptability (**93**) */
|
||||||
|
ERR_UNACCEPTABLE_CREDENTIAL: number,
|
||||||
|
/** Indicates that the either the sender or recipient of a
|
||||||
|
* voter-only request is not one of the expected voters (**94**) */
|
||||||
|
ERR_INCONSISTENT_VOTER_SET: number,
|
||||||
|
/** Invalid update version (**95**) */
|
||||||
|
ERR_INVALID_UPDATE_VERSION: number,
|
||||||
|
/** Unable to update finalized features due to server error (**96**) */
|
||||||
|
ERR_FEATURE_UPDATE_FAILED: number,
|
||||||
|
/** Request principal deserialization failed during forwarding (**97**) */
|
||||||
|
ERR_PRINCIPAL_DESERIALIZATION_FAILURE: number,
|
||||||
|
}}
|
347
index.d.ts
vendored
Normal file
347
index.d.ts
vendored
Normal file
@ -0,0 +1,347 @@
|
|||||||
|
import { Readable, ReadableOptions, Writable, WritableOptions } from 'stream';
|
||||||
|
import { EventEmitter } from 'events';
|
||||||
|
import {
|
||||||
|
GlobalConfig,
|
||||||
|
TopicConfig,
|
||||||
|
ConsumerGlobalConfig,
|
||||||
|
ConsumerTopicConfig,
|
||||||
|
ProducerGlobalConfig,
|
||||||
|
ProducerTopicConfig,
|
||||||
|
} from './config';
|
||||||
|
|
||||||
|
export * from './config';
|
||||||
|
export * from './errors';
|
||||||
|
|
||||||
|
export interface LibrdKafkaError {
|
||||||
|
message: string;
|
||||||
|
code: number;
|
||||||
|
errno: number;
|
||||||
|
origin: string;
|
||||||
|
stack?: string;
|
||||||
|
isFatal?: boolean;
|
||||||
|
isRetriable?: boolean;
|
||||||
|
isTxnRequiresAbort?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ReadyInfo {
|
||||||
|
name: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ClientMetrics {
|
||||||
|
connectionOpened: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface MetadataOptions {
|
||||||
|
topic?: string;
|
||||||
|
allTopics?: boolean;
|
||||||
|
timeout?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface BrokerMetadata {
|
||||||
|
id: number;
|
||||||
|
host: string;
|
||||||
|
port: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface PartitionMetadata {
|
||||||
|
id: number;
|
||||||
|
leader: number;
|
||||||
|
replicas: number[];
|
||||||
|
isrs: number[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TopicMetadata {
|
||||||
|
name: string;
|
||||||
|
partitions: PartitionMetadata[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface Metadata {
|
||||||
|
orig_broker_id: number;
|
||||||
|
orig_broker_name: string;
|
||||||
|
topics: TopicMetadata[];
|
||||||
|
brokers: BrokerMetadata[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface WatermarkOffsets{
|
||||||
|
lowOffset: number;
|
||||||
|
highOffset: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TopicPartition {
|
||||||
|
topic: string;
|
||||||
|
partition: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TopicPartitionOffset extends TopicPartition{
|
||||||
|
offset: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type TopicPartitionTime = TopicPartitionOffset;
|
||||||
|
|
||||||
|
export type EofEvent = TopicPartitionOffset;
|
||||||
|
|
||||||
|
export type Assignment = TopicPartition | TopicPartitionOffset;
|
||||||
|
|
||||||
|
export interface DeliveryReport extends TopicPartitionOffset {
|
||||||
|
value?: MessageValue;
|
||||||
|
size: number;
|
||||||
|
key?: MessageKey;
|
||||||
|
timestamp?: number;
|
||||||
|
opaque?: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type NumberNullUndefined = number | null | undefined;
|
||||||
|
|
||||||
|
export type MessageKey = Buffer | string | null | undefined;
|
||||||
|
export type MessageHeader = { [key: string]: string | Buffer };
|
||||||
|
export type MessageValue = Buffer | null;
|
||||||
|
export type SubscribeTopic = string | RegExp;
|
||||||
|
export type SubscribeTopicList = SubscribeTopic[];
|
||||||
|
|
||||||
|
export interface Message extends TopicPartitionOffset {
|
||||||
|
value: MessageValue;
|
||||||
|
size: number;
|
||||||
|
topic: string;
|
||||||
|
key?: MessageKey;
|
||||||
|
timestamp?: number;
|
||||||
|
headers?: MessageHeader[];
|
||||||
|
opaque?: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ReadStreamOptions extends ReadableOptions {
|
||||||
|
topics: SubscribeTopicList | SubscribeTopic | ((metadata: Metadata) => SubscribeTopicList);
|
||||||
|
waitInterval?: number;
|
||||||
|
fetchSize?: number;
|
||||||
|
objectMode?: boolean;
|
||||||
|
highWaterMark?: number;
|
||||||
|
autoClose?: boolean;
|
||||||
|
streamAsBatch?: boolean;
|
||||||
|
connectOptions?: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface WriteStreamOptions extends WritableOptions {
|
||||||
|
encoding?: string;
|
||||||
|
objectMode?: boolean;
|
||||||
|
topic?: string;
|
||||||
|
autoClose?: boolean;
|
||||||
|
pollInterval?: number;
|
||||||
|
connectOptions?: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ProducerStream extends Writable {
|
||||||
|
producer: Producer;
|
||||||
|
connect(metadataOptions?: MetadataOptions): void;
|
||||||
|
close(cb?: () => void): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ConsumerStream extends Readable {
|
||||||
|
consumer: KafkaConsumer;
|
||||||
|
connect(options: ConsumerGlobalConfig): void;
|
||||||
|
close(cb?: () => void): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
type KafkaClientEvents = 'disconnected' | 'ready' | 'connection.failure' | 'event.error' | 'event.stats' | 'event.log' | 'event.event' | 'event.throttle';
|
||||||
|
type KafkaConsumerEvents = 'data' | 'partition.eof' | 'rebalance' | 'rebalance.error' | 'subscribed' | 'unsubscribed' | 'unsubscribe' | 'offset.commit' | KafkaClientEvents;
|
||||||
|
type KafkaProducerEvents = 'delivery-report' | KafkaClientEvents;
|
||||||
|
|
||||||
|
type EventListenerMap = {
|
||||||
|
// ### Client
|
||||||
|
// connectivity events
|
||||||
|
'disconnected': (metrics: ClientMetrics) => void,
|
||||||
|
'ready': (info: ReadyInfo, metadata: Metadata) => void,
|
||||||
|
'connection.failure': (error: LibrdKafkaError, metrics: ClientMetrics) => void,
|
||||||
|
// event messages
|
||||||
|
'event.error': (error: LibrdKafkaError) => void,
|
||||||
|
'event.stats': (eventData: any) => void,
|
||||||
|
'event.log': (eventData: any) => void,
|
||||||
|
'event.event': (eventData: any) => void,
|
||||||
|
'event.throttle': (eventData: any) => void,
|
||||||
|
// ### Consumer only
|
||||||
|
// domain events
|
||||||
|
'data': (arg: Message) => void,
|
||||||
|
'partition.eof': (arg: EofEvent) => void,
|
||||||
|
'rebalance': (err: LibrdKafkaError, assignments: TopicPartition[]) => void,
|
||||||
|
'rebalance.error': (err: Error) => void,
|
||||||
|
// connectivity events
|
||||||
|
'subscribed': (topics: SubscribeTopicList) => void,
|
||||||
|
'unsubscribe': () => void,
|
||||||
|
'unsubscribed': () => void,
|
||||||
|
// offsets
|
||||||
|
'offset.commit': (error: LibrdKafkaError, topicPartitions: TopicPartitionOffset[]) => void,
|
||||||
|
// ### Producer only
|
||||||
|
// delivery
|
||||||
|
'delivery-report': (error: LibrdKafkaError, report: DeliveryReport) => void,
|
||||||
|
}
|
||||||
|
|
||||||
|
type EventListener<K extends string> = K extends keyof EventListenerMap ? EventListenerMap[K] : never;
|
||||||
|
|
||||||
|
export abstract class Client<Events extends string> extends EventEmitter {
|
||||||
|
constructor(globalConf: GlobalConfig, SubClientType: any, topicConf: TopicConfig);
|
||||||
|
|
||||||
|
connect(metadataOptions?: MetadataOptions, cb?: (err: LibrdKafkaError, data: Metadata) => any): this;
|
||||||
|
|
||||||
|
getClient(): any;
|
||||||
|
|
||||||
|
connectedTime(): number;
|
||||||
|
|
||||||
|
getLastError(): LibrdKafkaError;
|
||||||
|
|
||||||
|
disconnect(cb?: (err: any, data: ClientMetrics) => any): this;
|
||||||
|
disconnect(timeout: number, cb?: (err: any, data: ClientMetrics) => any): this;
|
||||||
|
|
||||||
|
isConnected(): boolean;
|
||||||
|
|
||||||
|
getMetadata(metadataOptions?: MetadataOptions, cb?: (err: LibrdKafkaError, data: Metadata) => any): any;
|
||||||
|
|
||||||
|
queryWatermarkOffsets(topic: string, partition: number, timeout: number, cb?: (err: LibrdKafkaError, offsets: WatermarkOffsets) => any): any;
|
||||||
|
queryWatermarkOffsets(topic: string, partition: number, cb?: (err: LibrdKafkaError, offsets: WatermarkOffsets) => any): any;
|
||||||
|
|
||||||
|
on<E extends Events>(event: E, listener: EventListener<E>): this;
|
||||||
|
once<E extends Events>(event: E, listener: EventListener<E>): this;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class KafkaConsumer extends Client<KafkaConsumerEvents> {
|
||||||
|
constructor(conf: ConsumerGlobalConfig, topicConf: ConsumerTopicConfig);
|
||||||
|
|
||||||
|
assign(assignments: Assignment[]): this;
|
||||||
|
|
||||||
|
assignments(): Assignment[];
|
||||||
|
|
||||||
|
commit(topicPartition: TopicPartitionOffset | TopicPartitionOffset[]): this;
|
||||||
|
commit(): this;
|
||||||
|
|
||||||
|
commitMessage(msg: TopicPartitionOffset): this;
|
||||||
|
|
||||||
|
commitMessageSync(msg: TopicPartitionOffset): this;
|
||||||
|
|
||||||
|
commitSync(topicPartition: TopicPartitionOffset | TopicPartitionOffset[]): this;
|
||||||
|
|
||||||
|
committed(toppars: TopicPartition[], timeout: number, cb: (err: LibrdKafkaError, topicPartitions: TopicPartitionOffset[]) => void): this;
|
||||||
|
committed(timeout: number, cb: (err: LibrdKafkaError, topicPartitions: TopicPartitionOffset[]) => void): this;
|
||||||
|
|
||||||
|
consume(number: number, cb?: (err: LibrdKafkaError, messages: Message[]) => void): void;
|
||||||
|
consume(cb: (err: LibrdKafkaError, messages: Message[]) => void): void;
|
||||||
|
consume(): void;
|
||||||
|
|
||||||
|
getWatermarkOffsets(topic: string, partition: number): WatermarkOffsets;
|
||||||
|
|
||||||
|
offsetsStore(topicPartitions: TopicPartitionOffset[]): any;
|
||||||
|
|
||||||
|
pause(topicPartitions: TopicPartition[]): any;
|
||||||
|
|
||||||
|
position(toppars?: TopicPartition[]): TopicPartitionOffset[];
|
||||||
|
|
||||||
|
resume(topicPartitions: TopicPartition[]): any;
|
||||||
|
|
||||||
|
seek(toppar: TopicPartitionOffset, timeout: number | null, cb: (err: LibrdKafkaError) => void): this;
|
||||||
|
|
||||||
|
setDefaultConsumeTimeout(timeoutMs: number): void;
|
||||||
|
|
||||||
|
setDefaultConsumeLoopTimeoutDelay(timeoutMs: number): void;
|
||||||
|
|
||||||
|
subscribe(topics: SubscribeTopicList): this;
|
||||||
|
|
||||||
|
subscription(): string[];
|
||||||
|
|
||||||
|
unassign(): this;
|
||||||
|
|
||||||
|
unsubscribe(): this;
|
||||||
|
|
||||||
|
offsetsForTimes(topicPartitions: TopicPartitionTime[], timeout: number, cb?: (err: LibrdKafkaError, offsets: TopicPartitionOffset[]) => any): void;
|
||||||
|
offsetsForTimes(topicPartitions: TopicPartitionTime[], cb?: (err: LibrdKafkaError, offsets: TopicPartitionOffset[]) => any): void;
|
||||||
|
|
||||||
|
static createReadStream(conf: ConsumerGlobalConfig, topicConfig: ConsumerTopicConfig, streamOptions: ReadStreamOptions | number): ConsumerStream;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class Producer extends Client<KafkaProducerEvents> {
|
||||||
|
constructor(conf: ProducerGlobalConfig, topicConf?: ProducerTopicConfig);
|
||||||
|
|
||||||
|
flush(timeout?: NumberNullUndefined, cb?: (err: LibrdKafkaError) => void): this;
|
||||||
|
|
||||||
|
poll(): this;
|
||||||
|
|
||||||
|
produce(topic: string, partition: NumberNullUndefined, message: MessageValue, key?: MessageKey, timestamp?: NumberNullUndefined, opaque?: any, headers?: MessageHeader[]): any;
|
||||||
|
|
||||||
|
setPollInterval(interval: number): this;
|
||||||
|
|
||||||
|
static createWriteStream(conf: ProducerGlobalConfig, topicConf: ProducerTopicConfig, streamOptions: WriteStreamOptions): ProducerStream;
|
||||||
|
|
||||||
|
initTransactions(cb: (err: LibrdKafkaError) => void): void;
|
||||||
|
initTransactions(timeout: number, cb: (err: LibrdKafkaError) => void): void;
|
||||||
|
beginTransaction(cb: (err: LibrdKafkaError) => void): void;
|
||||||
|
commitTransaction(cb: (err: LibrdKafkaError) => void): void;
|
||||||
|
commitTransaction(timeout: number, cb: (err: LibrdKafkaError) => void): void;
|
||||||
|
abortTransaction(cb: (err: LibrdKafkaError) => void): void;
|
||||||
|
abortTransaction(timeout: number, cb: (err: LibrdKafkaError) => void): void;
|
||||||
|
sendOffsetsToTransaction(offsets: TopicPartitionOffset[], consumer: KafkaConsumer, cb: (err: LibrdKafkaError) => void): void;
|
||||||
|
sendOffsetsToTransaction(offsets: TopicPartitionOffset[], consumer: KafkaConsumer, timeout: number, cb: (err: LibrdKafkaError) => void): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class HighLevelProducer extends Producer {
|
||||||
|
produce(topic: string, partition: NumberNullUndefined, message: any, key: any, timestamp: NumberNullUndefined, callback: (err: any, offset?: NumberNullUndefined) => void): any;
|
||||||
|
produce(topic: string, partition: NumberNullUndefined, message: any, key: any, timestamp: NumberNullUndefined, headers: MessageHeader[], callback: (err: any, offset?: NumberNullUndefined) => void): any;
|
||||||
|
|
||||||
|
setKeySerializer(serializer: (key: any, cb: (err: any, key: MessageKey) => void) => void): void;
|
||||||
|
setKeySerializer(serializer: (key: any) => MessageKey | Promise<MessageKey>): void;
|
||||||
|
setValueSerializer(serializer: (value: any, cb: (err: any, value: MessageValue) => void) => void): void;
|
||||||
|
setValueSerializer(serializer: (value: any) => MessageValue | Promise<MessageValue>): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const features: string[];
|
||||||
|
|
||||||
|
export const librdkafkaVersion: string;
|
||||||
|
|
||||||
|
export function createReadStream(conf: ConsumerGlobalConfig, topicConf: ConsumerTopicConfig, streamOptions: ReadStreamOptions | number): ConsumerStream;
|
||||||
|
|
||||||
|
export function createWriteStream(conf: ProducerGlobalConfig, topicConf: ProducerTopicConfig, streamOptions: WriteStreamOptions): ProducerStream;
|
||||||
|
|
||||||
|
export interface NewTopic {
|
||||||
|
topic: string;
|
||||||
|
num_partitions: number;
|
||||||
|
replication_factor: number;
|
||||||
|
config?: {
|
||||||
|
'cleanup.policy'?: 'delete' | 'compact' | 'delete,compact' | 'compact,delete';
|
||||||
|
'compression.type'?: 'gzip' | 'snappy' | 'lz4' | 'zstd' | 'uncompressed' | 'producer';
|
||||||
|
'delete.retention.ms'?: string;
|
||||||
|
'file.delete.delay.ms'?: string;
|
||||||
|
'flush.messages'?: string;
|
||||||
|
'flush.ms'?: string;
|
||||||
|
'follower.replication.throttled.replicas'?: string;
|
||||||
|
'index.interval.bytes'?: string;
|
||||||
|
'leader.replication.throttled.replicas'?: string;
|
||||||
|
'max.compaction.lag.ms'?: string;
|
||||||
|
'max.message.bytes'?: string;
|
||||||
|
'message.format.version'?: string;
|
||||||
|
'message.timestamp.difference.max.ms'?: string;
|
||||||
|
'message.timestamp.type'?: string;
|
||||||
|
'min.cleanable.dirty.ratio'?: string;
|
||||||
|
'min.compaction.lag.ms'?: string;
|
||||||
|
'min.insync.replicas'?: string;
|
||||||
|
'preallocate'?: string;
|
||||||
|
'retention.bytes'?: string;
|
||||||
|
'retention.ms'?: string;
|
||||||
|
'segment.bytes'?: string;
|
||||||
|
'segment.index.bytes'?: string;
|
||||||
|
'segment.jitter.ms'?: string;
|
||||||
|
'segment.ms'?: string;
|
||||||
|
'unclean.leader.election.enable'?: string;
|
||||||
|
'message.downconversion.enable'?: string;
|
||||||
|
} | { [cfg: string]: string; };
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface IAdminClient {
|
||||||
|
createTopic(topic: NewTopic, cb?: (err: LibrdKafkaError) => void): void;
|
||||||
|
createTopic(topic: NewTopic, timeout?: number, cb?: (err: LibrdKafkaError) => void): void;
|
||||||
|
|
||||||
|
deleteTopic(topic: string, cb?: (err: LibrdKafkaError) => void): void;
|
||||||
|
deleteTopic(topic: string, timeout?: number, cb?: (err: LibrdKafkaError) => void): void;
|
||||||
|
|
||||||
|
createPartitions(topic: string, desiredPartitions: number, cb?: (err: LibrdKafkaError) => void): void;
|
||||||
|
createPartitions(topic: string, desiredPartitions: number, timeout?: number, cb?: (err: LibrdKafkaError) => void): void;
|
||||||
|
|
||||||
|
disconnect(): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export abstract class AdminClient {
|
||||||
|
static create(conf: GlobalConfig): IAdminClient;
|
||||||
|
}
|
213
lib/admin.js
Normal file
213
lib/admin.js
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
'use strict';
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
create: createAdminClient,
|
||||||
|
};
|
||||||
|
|
||||||
|
var Client = require('./client');
|
||||||
|
var util = require('util');
|
||||||
|
var Kafka = require('../librdkafka');
|
||||||
|
var LibrdKafkaError = require('./error');
|
||||||
|
var shallowCopy = require('./util').shallowCopy;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a new AdminClient for making topics, partitions, and more.
|
||||||
|
*
|
||||||
|
* This is a factory method because it immediately starts an
|
||||||
|
* active handle with the brokers.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
function createAdminClient(conf) {
|
||||||
|
var client = new AdminClient(conf);
|
||||||
|
|
||||||
|
// Wrap the error so we throw if it failed with some context
|
||||||
|
LibrdKafkaError.wrap(client.connect(), true);
|
||||||
|
|
||||||
|
// Return the client if we succeeded
|
||||||
|
return client;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* AdminClient class for administering Kafka
|
||||||
|
*
|
||||||
|
* This client is the way you can interface with the Kafka Admin APIs.
|
||||||
|
* This class should not be made using the constructor, but instead
|
||||||
|
* should be made using the factory method.
|
||||||
|
*
|
||||||
|
* <code>
|
||||||
|
* var client = AdminClient.create({ ... });
|
||||||
|
* </code>
|
||||||
|
*
|
||||||
|
* Once you instantiate this object, it will have a handle to the kafka broker.
|
||||||
|
* Unlike the other node-rdkafka classes, this class does not ensure that
|
||||||
|
* it is connected to the upstream broker. Instead, making an action will
|
||||||
|
* validate that.
|
||||||
|
*
|
||||||
|
* @param {object} conf - Key value pairs to configure the admin client
|
||||||
|
* topic configuration
|
||||||
|
* @constructor
|
||||||
|
*/
|
||||||
|
function AdminClient(conf) {
|
||||||
|
if (!(this instanceof AdminClient)) {
|
||||||
|
return new AdminClient(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
conf = shallowCopy(conf);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* NewTopic model.
|
||||||
|
*
|
||||||
|
* This is the representation of a new message that is requested to be made
|
||||||
|
* using the Admin client.
|
||||||
|
*
|
||||||
|
* @typedef {object} AdminClient~NewTopic
|
||||||
|
* @property {string} topic - the topic name to create
|
||||||
|
* @property {number} num_partitions - the number of partitions to give the topic
|
||||||
|
* @property {number} replication_factor - the replication factor of the topic
|
||||||
|
* @property {object} config - a list of key values to be passed as configuration
|
||||||
|
* for the topic.
|
||||||
|
*/
|
||||||
|
|
||||||
|
this._client = new Kafka.AdminClient(conf);
|
||||||
|
this._isConnected = false;
|
||||||
|
this.globalConfig = conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Connect using the admin client.
|
||||||
|
*
|
||||||
|
* Should be run using the factory method, so should never
|
||||||
|
* need to be called outside.
|
||||||
|
*
|
||||||
|
* Unlike the other connect methods, this one is synchronous.
|
||||||
|
*/
|
||||||
|
AdminClient.prototype.connect = function() {
|
||||||
|
LibrdKafkaError.wrap(this._client.connect(), true);
|
||||||
|
this._isConnected = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disconnect the admin client.
|
||||||
|
*
|
||||||
|
* This is a synchronous method, but all it does is clean up
|
||||||
|
* some memory and shut some threads down
|
||||||
|
*/
|
||||||
|
AdminClient.prototype.disconnect = function() {
|
||||||
|
LibrdKafkaError.wrap(this._client.disconnect(), true);
|
||||||
|
this._isConnected = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a topic with a given config.
|
||||||
|
*
|
||||||
|
* @param {NewTopic} topic - Topic to create.
|
||||||
|
* @param {number} timeout - Number of milliseconds to wait while trying to create the topic.
|
||||||
|
* @param {function} cb - The callback to be executed when finished
|
||||||
|
*/
|
||||||
|
AdminClient.prototype.createTopic = function(topic, timeout, cb) {
|
||||||
|
if (!this._isConnected) {
|
||||||
|
throw new Error('Client is disconnected');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!timeout) {
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
|
||||||
|
this._client.createTopic(topic, timeout, function(err) {
|
||||||
|
if (err) {
|
||||||
|
if (cb) {
|
||||||
|
cb(LibrdKafkaError.create(err));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cb) {
|
||||||
|
cb();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete a topic.
|
||||||
|
*
|
||||||
|
* @param {string} topic - The topic to delete, by name.
|
||||||
|
* @param {number} timeout - Number of milliseconds to wait while trying to delete the topic.
|
||||||
|
* @param {function} cb - The callback to be executed when finished
|
||||||
|
*/
|
||||||
|
AdminClient.prototype.deleteTopic = function(topic, timeout, cb) {
|
||||||
|
if (!this._isConnected) {
|
||||||
|
throw new Error('Client is disconnected');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!timeout) {
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
|
||||||
|
this._client.deleteTopic(topic, timeout, function(err) {
|
||||||
|
if (err) {
|
||||||
|
if (cb) {
|
||||||
|
cb(LibrdKafkaError.create(err));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cb) {
|
||||||
|
cb();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create new partitions for a topic.
|
||||||
|
*
|
||||||
|
* @param {string} topic - The topic to add partitions to, by name.
|
||||||
|
* @param {number} totalPartitions - The total number of partitions the topic should have
|
||||||
|
* after the request
|
||||||
|
* @param {number} timeout - Number of milliseconds to wait while trying to create the partitions.
|
||||||
|
* @param {function} cb - The callback to be executed when finished
|
||||||
|
*/
|
||||||
|
AdminClient.prototype.createPartitions = function(topic, totalPartitions, timeout, cb) {
|
||||||
|
if (!this._isConnected) {
|
||||||
|
throw new Error('Client is disconnected');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!timeout) {
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
|
||||||
|
this._client.createPartitions(topic, totalPartitions, timeout, function(err) {
|
||||||
|
if (err) {
|
||||||
|
if (cb) {
|
||||||
|
cb(LibrdKafkaError.create(err));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cb) {
|
||||||
|
cb();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
554
lib/client.js
Normal file
554
lib/client.js
Normal file
@ -0,0 +1,554 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
module.exports = Client;
|
||||||
|
|
||||||
|
var Emitter = require('events').EventEmitter;
|
||||||
|
var util = require('util');
|
||||||
|
var Kafka = require('../librdkafka.js');
|
||||||
|
var assert = require('assert');
|
||||||
|
|
||||||
|
var LibrdKafkaError = require('./error');
|
||||||
|
|
||||||
|
util.inherits(Client, Emitter);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base class for Consumer and Producer
|
||||||
|
*
|
||||||
|
* This should not be created independently, but rather is
|
||||||
|
* the base class on which both producer and consumer
|
||||||
|
* get their common functionality.
|
||||||
|
*
|
||||||
|
* @param {object} globalConf - Global configuration in key value pairs.
|
||||||
|
* @param {function} SubClientType - The function representing the subclient
|
||||||
|
* type. In C++ land this needs to be a class that inherits from Connection.
|
||||||
|
* @param {object} topicConf - Topic configuration in key value pairs
|
||||||
|
* @constructor
|
||||||
|
* @extends Emitter
|
||||||
|
*/
|
||||||
|
function Client(globalConf, SubClientType, topicConf) {
|
||||||
|
if (!(this instanceof Client)) {
|
||||||
|
return new Client(globalConf, SubClientType, topicConf);
|
||||||
|
}
|
||||||
|
|
||||||
|
Emitter.call(this);
|
||||||
|
|
||||||
|
// This superclass must be initialized with the Kafka.{Producer,Consumer}
|
||||||
|
// @example var client = new Client({}, Kafka.Producer);
|
||||||
|
// remember this is a superclass so this will get taken care of in
|
||||||
|
// the producer and consumer main wrappers
|
||||||
|
|
||||||
|
var no_event_cb = globalConf.event_cb === false;
|
||||||
|
topicConf = topicConf || {};
|
||||||
|
|
||||||
|
// delete this because librdkafka will complain since this particular
|
||||||
|
// key is a real conf value
|
||||||
|
delete globalConf.event_cb;
|
||||||
|
|
||||||
|
this._client = new SubClientType(globalConf, topicConf);
|
||||||
|
|
||||||
|
var extractFunctions = function(obj) {
|
||||||
|
obj = obj || {};
|
||||||
|
var obj2 = {};
|
||||||
|
for (var p in obj) {
|
||||||
|
if (typeof obj[p] === "function") {
|
||||||
|
obj2[p] = obj[p];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return obj2;
|
||||||
|
}
|
||||||
|
this._cb_configs = {
|
||||||
|
global: extractFunctions(globalConf),
|
||||||
|
topic: extractFunctions(topicConf),
|
||||||
|
event: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!no_event_cb) {
|
||||||
|
this._cb_configs.event.event_cb = function(eventType, eventData) {
|
||||||
|
switch (eventType) {
|
||||||
|
case 'error':
|
||||||
|
this.emit('event.error', LibrdKafkaError.create(eventData));
|
||||||
|
break;
|
||||||
|
case 'stats':
|
||||||
|
this.emit('event.stats', eventData);
|
||||||
|
break;
|
||||||
|
case 'log':
|
||||||
|
this.emit('event.log', eventData);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
this.emit('event.event', eventData);
|
||||||
|
this.emit('event.' + eventType, eventData);
|
||||||
|
}
|
||||||
|
}.bind(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.metrics = {};
|
||||||
|
this._isConnected = false;
|
||||||
|
this.errorCounter = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metadata object. Starts out empty but will be filled with information after
|
||||||
|
* the initial connect.
|
||||||
|
*
|
||||||
|
* @type {Client~Metadata}
|
||||||
|
*/
|
||||||
|
this._metadata = {};
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
this.on('ready', function(info) {
|
||||||
|
self.metrics.connectionOpened = Date.now();
|
||||||
|
self.name = info.name;
|
||||||
|
})
|
||||||
|
.on('disconnected', function() {
|
||||||
|
// reset metrics
|
||||||
|
self.metrics = {};
|
||||||
|
self._isConnected = false;
|
||||||
|
// keep the metadata. it still may be useful
|
||||||
|
})
|
||||||
|
.on('event.error', function(err) {
|
||||||
|
self.lastError = err;
|
||||||
|
++self.errorCounter;
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Connect to the broker and receive its metadata.
|
||||||
|
*
|
||||||
|
* Connects to a broker by establishing the client and fetches its metadata.
|
||||||
|
*
|
||||||
|
* @param {object} metadataOptions - Options to be sent to the metadata.
|
||||||
|
* @param {string} metadataOptions.topic - Topic to fetch metadata for. Empty string is treated as empty.
|
||||||
|
* @param {boolean} metadataOptions.allTopics - Fetch metadata for all topics, not just the ones we know about.
|
||||||
|
* @param {int} metadataOptions.timeout - The timeout, in ms, to allow for fetching metadata. Defaults to 30000ms
|
||||||
|
* @param {Client~connectionCallback} cb - Callback that indicates we are
|
||||||
|
* done connecting.
|
||||||
|
* @return {Client} - Returns itself.
|
||||||
|
*/
|
||||||
|
Client.prototype.connect = function(metadataOptions, cb) {
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
var next = function(err, data) {
|
||||||
|
self._isConnecting = false;
|
||||||
|
if (cb) {
|
||||||
|
cb(err, data);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (this._isConnected) {
|
||||||
|
setImmediate(next);
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this._isConnecting) {
|
||||||
|
this.once('ready', function() {
|
||||||
|
next(null, this._metadata);
|
||||||
|
});
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
this._isConnecting = true;
|
||||||
|
|
||||||
|
var fail = function(err) {
|
||||||
|
var callbackCalled = false;
|
||||||
|
var t;
|
||||||
|
|
||||||
|
if (self._isConnected) {
|
||||||
|
self._isConnected = false;
|
||||||
|
self._client.disconnect(function() {
|
||||||
|
if (callbackCalled) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
clearTimeout(t);
|
||||||
|
callbackCalled = true;
|
||||||
|
|
||||||
|
next(err); return;
|
||||||
|
});
|
||||||
|
|
||||||
|
// don't take too long. this is a failure, after all
|
||||||
|
t = setTimeout(function() {
|
||||||
|
if (callbackCalled) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
callbackCalled = true;
|
||||||
|
|
||||||
|
next(err); return;
|
||||||
|
}, 10000).unref();
|
||||||
|
|
||||||
|
self.emit('connection.failure', err, self.metrics);
|
||||||
|
} else {
|
||||||
|
|
||||||
|
next(err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
this._client.configureCallbacks(true, this._cb_configs);
|
||||||
|
|
||||||
|
this._client.connect(function(err, info) {
|
||||||
|
if (err) {
|
||||||
|
fail(LibrdKafkaError.create(err)); return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self._isConnected = true;
|
||||||
|
|
||||||
|
// Otherwise we are successful
|
||||||
|
self.getMetadata(metadataOptions || {}, function(err, metadata) {
|
||||||
|
if (err) {
|
||||||
|
// We are connected so we need to disconnect
|
||||||
|
fail(LibrdKafkaError.create(err)); return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self._isConnecting = false;
|
||||||
|
// We got the metadata otherwise. It is set according to above param
|
||||||
|
// Set it here as well so subsequent ready callbacks
|
||||||
|
// can check it
|
||||||
|
self._isConnected = true;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ready event. Called when the Client connects successfully
|
||||||
|
*
|
||||||
|
* @event Client#ready
|
||||||
|
* @type {object}
|
||||||
|
* @property {string} name - the name of the broker.
|
||||||
|
*/
|
||||||
|
self.emit('ready', info, metadata);
|
||||||
|
next(null, metadata); return;
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
return self;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the native Kafka client.
|
||||||
|
*
|
||||||
|
* You probably shouldn't use this, but if you want to execute methods directly
|
||||||
|
* on the c++ wrapper you can do it here.
|
||||||
|
*
|
||||||
|
* @see connection.cc
|
||||||
|
* @return {Connection} - The native Kafka client.
|
||||||
|
*/
|
||||||
|
Client.prototype.getClient = function() {
|
||||||
|
return this._client;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find out how long we have been connected to Kafka.
|
||||||
|
*
|
||||||
|
* @return {number} - Milliseconds since the connection has been established.
|
||||||
|
*/
|
||||||
|
Client.prototype.connectedTime = function() {
|
||||||
|
if (!this.isConnected()) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return Date.now() - this.metrics.connectionOpened;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether or not we are connected to Kafka.
|
||||||
|
*
|
||||||
|
* @return {boolean} - Whether we are connected.
|
||||||
|
*/
|
||||||
|
Client.prototype.isConnected = function() {
|
||||||
|
return !!(this._isConnected && this._client);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the last error emitted if it exists.
|
||||||
|
*
|
||||||
|
* @return {LibrdKafkaError} - Returns the LibrdKafkaError or null if
|
||||||
|
* one hasn't been thrown.
|
||||||
|
*/
|
||||||
|
Client.prototype.getLastError = function() {
|
||||||
|
return this.lastError || null;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disconnect from the Kafka client.
|
||||||
|
*
|
||||||
|
* This method will disconnect us from Kafka unless we are already in a
|
||||||
|
* disconnecting state. Use this when you're done reading or producing messages
|
||||||
|
* on a given client.
|
||||||
|
*
|
||||||
|
* It will also emit the disconnected event.
|
||||||
|
*
|
||||||
|
* @fires Client#disconnected
|
||||||
|
* @return {function} - Callback to call when disconnection is complete.
|
||||||
|
*/
|
||||||
|
Client.prototype.disconnect = function(cb) {
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
if (!this._isDisconnecting && this._client) {
|
||||||
|
this._isDisconnecting = true;
|
||||||
|
this._client.disconnect(function() {
|
||||||
|
// this take 5000 milliseconds. Librdkafka needs to make sure the memory
|
||||||
|
// has been cleaned up before we delete things. @see RdKafka::wait_destroyed
|
||||||
|
self._client.configureCallbacks(false, self._cb_configs);
|
||||||
|
|
||||||
|
// Broadcast metrics. Gives people one last chance to do something with them
|
||||||
|
self._isDisconnecting = false;
|
||||||
|
/**
|
||||||
|
* Disconnect event. Called after disconnection is finished.
|
||||||
|
*
|
||||||
|
* @event Client#disconnected
|
||||||
|
* @type {object}
|
||||||
|
* @property {date} connectionOpened - when the connection was opened.
|
||||||
|
*/
|
||||||
|
var metricsCopy = Object.assign({}, self.metrics);
|
||||||
|
self.emit('disconnected', metricsCopy);
|
||||||
|
if (cb) {
|
||||||
|
cb(null, metricsCopy);
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return self;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get client metadata.
|
||||||
|
*
|
||||||
|
* Note: using a <code>metadataOptions.topic</code> parameter has a potential side-effect.
|
||||||
|
* A Topic object will be created, if it did not exist yet, with default options
|
||||||
|
* and it will be cached by librdkafka.
|
||||||
|
*
|
||||||
|
* A subsequent call to create the topic object with specific options (e.g. <code>acks</code>) will return
|
||||||
|
* the previous instance and the specific options will be silently ignored.
|
||||||
|
*
|
||||||
|
* To avoid this side effect, the topic object can be created with the expected options before requesting metadata,
|
||||||
|
* or the metadata request can be performed for all topics (by omitting <code>metadataOptions.topic</code>).
|
||||||
|
*
|
||||||
|
* @param {object} metadataOptions - Metadata options to pass to the client.
|
||||||
|
* @param {string} metadataOptions.topic - Topic string for which to fetch
|
||||||
|
* metadata
|
||||||
|
* @param {number} metadataOptions.timeout - Max time, in ms, to try to fetch
|
||||||
|
* metadata before timing out. Defaults to 3000.
|
||||||
|
* @param {Client~metadataCallback} cb - Callback to fire with the metadata.
|
||||||
|
*/
|
||||||
|
Client.prototype.getMetadata = function(metadataOptions, cb) {
|
||||||
|
if (!this.isConnected()) {
|
||||||
|
return cb(new Error('Client is disconnected'));
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
this._client.getMetadata(metadataOptions || {}, function(err, metadata) {
|
||||||
|
if (err) {
|
||||||
|
if (cb) {
|
||||||
|
cb(LibrdKafkaError.create(err));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// No error otherwise
|
||||||
|
self._metadata = metadata;
|
||||||
|
|
||||||
|
if (cb) {
|
||||||
|
cb(null, metadata);
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query offsets from the broker.
|
||||||
|
*
|
||||||
|
* This function makes a call to the broker to get the current low (oldest/beginning)
|
||||||
|
* and high (newest/end) offsets for a topic partition.
|
||||||
|
*
|
||||||
|
* @param {string} topic - Topic to recieve offsets from.
|
||||||
|
* @param {number} partition - Partition of the provided topic to recieve offsets from
|
||||||
|
* @param {number} timeout - Number of ms to wait to recieve a response.
|
||||||
|
* @param {Client~watermarkOffsetsCallback} cb - Callback to fire with the offsets.
|
||||||
|
*/
|
||||||
|
Client.prototype.queryWatermarkOffsets = function(topic, partition, timeout, cb) {
|
||||||
|
if (!this.isConnected()) {
|
||||||
|
if (cb) {
|
||||||
|
return cb(new Error('Client is disconnected'));
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
timeout = 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!timeout) {
|
||||||
|
timeout = 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
this._client.queryWatermarkOffsets(topic, partition, timeout, function(err, offsets) {
|
||||||
|
if (err) {
|
||||||
|
if (cb) {
|
||||||
|
cb(LibrdKafkaError.create(err));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cb) {
|
||||||
|
cb(null, offsets);
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query offsets for times from the broker.
|
||||||
|
*
|
||||||
|
* This function makes a call to the broker to get the offsets for times specified.
|
||||||
|
*
|
||||||
|
* @param {TopicPartition[]} toppars - Array of topic partitions. The offset in these
|
||||||
|
* should instead refer to a timestamp you want
|
||||||
|
* offsets for
|
||||||
|
* @param {number} timeout - Number of ms to wait to recieve a response.
|
||||||
|
* @param {Client~offsetsForTimesCallback} cb - Callback to fire with the filled in offsets.
|
||||||
|
*/
|
||||||
|
Client.prototype.offsetsForTimes = function(toppars, timeout, cb) {
|
||||||
|
if (!this.isConnected()) {
|
||||||
|
if (cb) {
|
||||||
|
return cb(new Error('Client is disconnected'));
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
timeout = 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!timeout) {
|
||||||
|
timeout = 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
this._client.offsetsForTimes(toppars, timeout, function(err, toppars) {
|
||||||
|
if (err) {
|
||||||
|
if (cb) {
|
||||||
|
cb(LibrdKafkaError.create(err));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cb) {
|
||||||
|
cb(null, toppars);
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrap a potential RdKafka error.
|
||||||
|
*
|
||||||
|
* This internal method is meant to take a return value
|
||||||
|
* from a function that returns an RdKafka error code, throw if it
|
||||||
|
* is an error (Making it a proper librdkafka error object), or
|
||||||
|
* return the appropriate value otherwise.
|
||||||
|
*
|
||||||
|
* It is intended to be used in a return statement,
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @param {number} errorCode - Error code returned from a native method
|
||||||
|
* @param {bool} intIsError - If specified true, any non-number return type will be classified as a success
|
||||||
|
* @return {boolean} - Returns true or the method return value unless it throws.
|
||||||
|
*/
|
||||||
|
Client.prototype._errorWrap = function(errorCode, intIsError) {
|
||||||
|
var returnValue = true;
|
||||||
|
if (intIsError) {
|
||||||
|
returnValue = errorCode;
|
||||||
|
errorCode = typeof errorCode === 'number' ? errorCode : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (errorCode !== LibrdKafkaError.codes.ERR_NO_ERROR) {
|
||||||
|
var e = LibrdKafkaError.create(errorCode);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
|
||||||
|
return returnValue;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This callback is used to pass metadata or an error after a successful
|
||||||
|
* connection
|
||||||
|
*
|
||||||
|
* @callback Client~connectionCallback
|
||||||
|
* @param {Error} err - An error, if one occurred while connecting.
|
||||||
|
* @param {Client~Metadata} metadata - Metadata object.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This callback is used to pass offsets or an error after a successful
|
||||||
|
* query
|
||||||
|
*
|
||||||
|
* @callback Client~watermarkOffsetsCallback
|
||||||
|
* @param {Error} err - An error, if one occurred while connecting.
|
||||||
|
* @param {Client~watermarkOffsets} offsets - Watermark offsets
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This callback is used to pass toppars or an error after a successful
|
||||||
|
* times query
|
||||||
|
*
|
||||||
|
* @callback Client~offsetsForTimesCallback
|
||||||
|
* @param {Error} err - An error, if one occurred while connecting.
|
||||||
|
* @param {TopicPartition[]} toppars - Topic partitions with offsets filled in
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @typedef {object} Client~watermarkOffsets
|
||||||
|
* @property {number} high - High (newest/end) offset
|
||||||
|
* @property {number} low - Low (oldest/beginning) offset
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @typedef {object} Client~MetadataBroker
|
||||||
|
* @property {number} id - Broker ID
|
||||||
|
* @property {string} host - Broker host
|
||||||
|
* @property {number} port - Broker port.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @typedef {object} Client~MetadataTopic
|
||||||
|
* @property {string} name - Topic name
|
||||||
|
* @property {Client~MetadataPartition[]} partitions - Array of partitions
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @typedef {object} Client~MetadataPartition
|
||||||
|
* @property {number} id - Partition id
|
||||||
|
* @property {number} leader - Broker ID for the partition leader
|
||||||
|
* @property {number[]} replicas - Array of replica IDs
|
||||||
|
* @property {number[]} isrs - Arrqay of ISRS ids
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metadata object.
|
||||||
|
*
|
||||||
|
* This is the representation of Kafka metadata in JavaScript.
|
||||||
|
*
|
||||||
|
* @typedef {object} Client~Metadata
|
||||||
|
* @property {number} orig_broker_id - The broker ID of the original bootstrap
|
||||||
|
* broker.
|
||||||
|
* @property {string} orig_broker_name - The name of the original bootstrap
|
||||||
|
* broker.
|
||||||
|
* @property {Client~MetadataBroker[]} brokers - An array of broker objects
|
||||||
|
* @property {Client~MetadataTopic[]} topics - An array of topics.
|
||||||
|
*/
|
470
lib/error.js
Normal file
470
lib/error.js
Normal file
@ -0,0 +1,470 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
module.exports = LibrdKafkaError;
|
||||||
|
|
||||||
|
var util = require('util');
|
||||||
|
var librdkafka = require('../librdkafka');
|
||||||
|
|
||||||
|
util.inherits(LibrdKafkaError, Error);
|
||||||
|
|
||||||
|
LibrdKafkaError.create = createLibrdkafkaError;
|
||||||
|
LibrdKafkaError.wrap = errorWrap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enum for identifying errors reported by the library
|
||||||
|
*
|
||||||
|
* You can find this list in the C++ code at
|
||||||
|
* https://github.com/edenhill/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L148
|
||||||
|
*
|
||||||
|
* @readonly
|
||||||
|
* @enum {number}
|
||||||
|
* @constant
|
||||||
|
*/
|
||||||
|
// ====== Generated from librdkafka 1.6.1 file src-cpp/rdkafkacpp.h ======
|
||||||
|
LibrdKafkaError.codes = {
|
||||||
|
|
||||||
|
/* Internal errors to rdkafka: */
|
||||||
|
/** Begin internal error codes */
|
||||||
|
ERR__BEGIN: -200,
|
||||||
|
/** Received message is incorrect */
|
||||||
|
ERR__BAD_MSG: -199,
|
||||||
|
/** Bad/unknown compression */
|
||||||
|
ERR__BAD_COMPRESSION: -198,
|
||||||
|
/** Broker is going away */
|
||||||
|
ERR__DESTROY: -197,
|
||||||
|
/** Generic failure */
|
||||||
|
ERR__FAIL: -196,
|
||||||
|
/** Broker transport failure */
|
||||||
|
ERR__TRANSPORT: -195,
|
||||||
|
/** Critical system resource */
|
||||||
|
ERR__CRIT_SYS_RESOURCE: -194,
|
||||||
|
/** Failed to resolve broker */
|
||||||
|
ERR__RESOLVE: -193,
|
||||||
|
/** Produced message timed out*/
|
||||||
|
ERR__MSG_TIMED_OUT: -192,
|
||||||
|
/** Reached the end of the topic+partition queue on
|
||||||
|
* the broker. Not really an error.
|
||||||
|
* This event is disabled by default,
|
||||||
|
* see the `enable.partition.eof` configuration property. */
|
||||||
|
ERR__PARTITION_EOF: -191,
|
||||||
|
/** Permanent: Partition does not exist in cluster. */
|
||||||
|
ERR__UNKNOWN_PARTITION: -190,
|
||||||
|
/** File or filesystem error */
|
||||||
|
ERR__FS: -189,
|
||||||
|
/** Permanent: Topic does not exist in cluster. */
|
||||||
|
ERR__UNKNOWN_TOPIC: -188,
|
||||||
|
/** All broker connections are down. */
|
||||||
|
ERR__ALL_BROKERS_DOWN: -187,
|
||||||
|
/** Invalid argument, or invalid configuration */
|
||||||
|
ERR__INVALID_ARG: -186,
|
||||||
|
/** Operation timed out */
|
||||||
|
ERR__TIMED_OUT: -185,
|
||||||
|
/** Queue is full */
|
||||||
|
ERR__QUEUE_FULL: -184,
|
||||||
|
/** ISR count < required.acks */
|
||||||
|
ERR__ISR_INSUFF: -183,
|
||||||
|
/** Broker node update */
|
||||||
|
ERR__NODE_UPDATE: -182,
|
||||||
|
/** SSL error */
|
||||||
|
ERR__SSL: -181,
|
||||||
|
/** Waiting for coordinator to become available. */
|
||||||
|
ERR__WAIT_COORD: -180,
|
||||||
|
/** Unknown client group */
|
||||||
|
ERR__UNKNOWN_GROUP: -179,
|
||||||
|
/** Operation in progress */
|
||||||
|
ERR__IN_PROGRESS: -178,
|
||||||
|
/** Previous operation in progress, wait for it to finish. */
|
||||||
|
ERR__PREV_IN_PROGRESS: -177,
|
||||||
|
/** This operation would interfere with an existing subscription */
|
||||||
|
ERR__EXISTING_SUBSCRIPTION: -176,
|
||||||
|
/** Assigned partitions (rebalance_cb) */
|
||||||
|
ERR__ASSIGN_PARTITIONS: -175,
|
||||||
|
/** Revoked partitions (rebalance_cb) */
|
||||||
|
ERR__REVOKE_PARTITIONS: -174,
|
||||||
|
/** Conflicting use */
|
||||||
|
ERR__CONFLICT: -173,
|
||||||
|
/** Wrong state */
|
||||||
|
ERR__STATE: -172,
|
||||||
|
/** Unknown protocol */
|
||||||
|
ERR__UNKNOWN_PROTOCOL: -171,
|
||||||
|
/** Not implemented */
|
||||||
|
ERR__NOT_IMPLEMENTED: -170,
|
||||||
|
/** Authentication failure*/
|
||||||
|
ERR__AUTHENTICATION: -169,
|
||||||
|
/** No stored offset */
|
||||||
|
ERR__NO_OFFSET: -168,
|
||||||
|
/** Outdated */
|
||||||
|
ERR__OUTDATED: -167,
|
||||||
|
/** Timed out in queue */
|
||||||
|
ERR__TIMED_OUT_QUEUE: -166,
|
||||||
|
/** Feature not supported by broker */
|
||||||
|
ERR__UNSUPPORTED_FEATURE: -165,
|
||||||
|
/** Awaiting cache update */
|
||||||
|
ERR__WAIT_CACHE: -164,
|
||||||
|
/** Operation interrupted */
|
||||||
|
ERR__INTR: -163,
|
||||||
|
/** Key serialization error */
|
||||||
|
ERR__KEY_SERIALIZATION: -162,
|
||||||
|
/** Value serialization error */
|
||||||
|
ERR__VALUE_SERIALIZATION: -161,
|
||||||
|
/** Key deserialization error */
|
||||||
|
ERR__KEY_DESERIALIZATION: -160,
|
||||||
|
/** Value deserialization error */
|
||||||
|
ERR__VALUE_DESERIALIZATION: -159,
|
||||||
|
/** Partial response */
|
||||||
|
ERR__PARTIAL: -158,
|
||||||
|
/** Modification attempted on read-only object */
|
||||||
|
ERR__READ_ONLY: -157,
|
||||||
|
/** No such entry / item not found */
|
||||||
|
ERR__NOENT: -156,
|
||||||
|
/** Read underflow */
|
||||||
|
ERR__UNDERFLOW: -155,
|
||||||
|
/** Invalid type */
|
||||||
|
ERR__INVALID_TYPE: -154,
|
||||||
|
/** Retry operation */
|
||||||
|
ERR__RETRY: -153,
|
||||||
|
/** Purged in queue */
|
||||||
|
ERR__PURGE_QUEUE: -152,
|
||||||
|
/** Purged in flight */
|
||||||
|
ERR__PURGE_INFLIGHT: -151,
|
||||||
|
/** Fatal error: see RdKafka::Handle::fatal_error() */
|
||||||
|
ERR__FATAL: -150,
|
||||||
|
/** Inconsistent state */
|
||||||
|
ERR__INCONSISTENT: -149,
|
||||||
|
/** Gap-less ordering would not be guaranteed if proceeding */
|
||||||
|
ERR__GAPLESS_GUARANTEE: -148,
|
||||||
|
/** Maximum poll interval exceeded */
|
||||||
|
ERR__MAX_POLL_EXCEEDED: -147,
|
||||||
|
/** Unknown broker */
|
||||||
|
ERR__UNKNOWN_BROKER: -146,
|
||||||
|
/** Functionality not configured */
|
||||||
|
ERR__NOT_CONFIGURED: -145,
|
||||||
|
/** Instance has been fenced */
|
||||||
|
ERR__FENCED: -144,
|
||||||
|
/** Application generated error */
|
||||||
|
ERR__APPLICATION: -143,
|
||||||
|
/** Assignment lost */
|
||||||
|
ERR__ASSIGNMENT_LOST: -142,
|
||||||
|
/** No operation performed */
|
||||||
|
ERR__NOOP: -141,
|
||||||
|
/** No offset to automatically reset to */
|
||||||
|
ERR__AUTO_OFFSET_RESET: -140,
|
||||||
|
/** End internal error codes */
|
||||||
|
ERR__END: -100,
|
||||||
|
/* Kafka broker errors: */
|
||||||
|
/** Unknown broker error */
|
||||||
|
ERR_UNKNOWN: -1,
|
||||||
|
/** Success */
|
||||||
|
ERR_NO_ERROR: 0,
|
||||||
|
/** Offset out of range */
|
||||||
|
ERR_OFFSET_OUT_OF_RANGE: 1,
|
||||||
|
/** Invalid message */
|
||||||
|
ERR_INVALID_MSG: 2,
|
||||||
|
/** Unknown topic or partition */
|
||||||
|
ERR_UNKNOWN_TOPIC_OR_PART: 3,
|
||||||
|
/** Invalid message size */
|
||||||
|
ERR_INVALID_MSG_SIZE: 4,
|
||||||
|
/** Leader not available */
|
||||||
|
ERR_LEADER_NOT_AVAILABLE: 5,
|
||||||
|
/** Not leader for partition */
|
||||||
|
ERR_NOT_LEADER_FOR_PARTITION: 6,
|
||||||
|
/** Request timed out */
|
||||||
|
ERR_REQUEST_TIMED_OUT: 7,
|
||||||
|
/** Broker not available */
|
||||||
|
ERR_BROKER_NOT_AVAILABLE: 8,
|
||||||
|
/** Replica not available */
|
||||||
|
ERR_REPLICA_NOT_AVAILABLE: 9,
|
||||||
|
/** Message size too large */
|
||||||
|
ERR_MSG_SIZE_TOO_LARGE: 10,
|
||||||
|
/** StaleControllerEpochCode */
|
||||||
|
ERR_STALE_CTRL_EPOCH: 11,
|
||||||
|
/** Offset metadata string too large */
|
||||||
|
ERR_OFFSET_METADATA_TOO_LARGE: 12,
|
||||||
|
/** Broker disconnected before response received */
|
||||||
|
ERR_NETWORK_EXCEPTION: 13,
|
||||||
|
/** Coordinator load in progress */
|
||||||
|
ERR_COORDINATOR_LOAD_IN_PROGRESS: 14,
|
||||||
|
/** Group coordinator load in progress */
|
||||||
|
ERR_GROUP_LOAD_IN_PROGRESS: 14,
|
||||||
|
/** Coordinator not available */
|
||||||
|
ERR_COORDINATOR_NOT_AVAILABLE: 15,
|
||||||
|
/** Group coordinator not available */
|
||||||
|
ERR_GROUP_COORDINATOR_NOT_AVAILABLE: 15,
|
||||||
|
/** Not coordinator */
|
||||||
|
ERR_NOT_COORDINATOR: 16,
|
||||||
|
/** Not coordinator for group */
|
||||||
|
ERR_NOT_COORDINATOR_FOR_GROUP: 16,
|
||||||
|
/** Invalid topic */
|
||||||
|
ERR_TOPIC_EXCEPTION: 17,
|
||||||
|
/** Message batch larger than configured server segment size */
|
||||||
|
ERR_RECORD_LIST_TOO_LARGE: 18,
|
||||||
|
/** Not enough in-sync replicas */
|
||||||
|
ERR_NOT_ENOUGH_REPLICAS: 19,
|
||||||
|
/** Message(s) written to insufficient number of in-sync replicas */
|
||||||
|
ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND: 20,
|
||||||
|
/** Invalid required acks value */
|
||||||
|
ERR_INVALID_REQUIRED_ACKS: 21,
|
||||||
|
/** Specified group generation id is not valid */
|
||||||
|
ERR_ILLEGAL_GENERATION: 22,
|
||||||
|
/** Inconsistent group protocol */
|
||||||
|
ERR_INCONSISTENT_GROUP_PROTOCOL: 23,
|
||||||
|
/** Invalid group.id */
|
||||||
|
ERR_INVALID_GROUP_ID: 24,
|
||||||
|
/** Unknown member */
|
||||||
|
ERR_UNKNOWN_MEMBER_ID: 25,
|
||||||
|
/** Invalid session timeout */
|
||||||
|
ERR_INVALID_SESSION_TIMEOUT: 26,
|
||||||
|
/** Group rebalance in progress */
|
||||||
|
ERR_REBALANCE_IN_PROGRESS: 27,
|
||||||
|
/** Commit offset data size is not valid */
|
||||||
|
ERR_INVALID_COMMIT_OFFSET_SIZE: 28,
|
||||||
|
/** Topic authorization failed */
|
||||||
|
ERR_TOPIC_AUTHORIZATION_FAILED: 29,
|
||||||
|
/** Group authorization failed */
|
||||||
|
ERR_GROUP_AUTHORIZATION_FAILED: 30,
|
||||||
|
/** Cluster authorization failed */
|
||||||
|
ERR_CLUSTER_AUTHORIZATION_FAILED: 31,
|
||||||
|
/** Invalid timestamp */
|
||||||
|
ERR_INVALID_TIMESTAMP: 32,
|
||||||
|
/** Unsupported SASL mechanism */
|
||||||
|
ERR_UNSUPPORTED_SASL_MECHANISM: 33,
|
||||||
|
/** Illegal SASL state */
|
||||||
|
ERR_ILLEGAL_SASL_STATE: 34,
|
||||||
|
/** Unuspported version */
|
||||||
|
ERR_UNSUPPORTED_VERSION: 35,
|
||||||
|
/** Topic already exists */
|
||||||
|
ERR_TOPIC_ALREADY_EXISTS: 36,
|
||||||
|
/** Invalid number of partitions */
|
||||||
|
ERR_INVALID_PARTITIONS: 37,
|
||||||
|
/** Invalid replication factor */
|
||||||
|
ERR_INVALID_REPLICATION_FACTOR: 38,
|
||||||
|
/** Invalid replica assignment */
|
||||||
|
ERR_INVALID_REPLICA_ASSIGNMENT: 39,
|
||||||
|
/** Invalid config */
|
||||||
|
ERR_INVALID_CONFIG: 40,
|
||||||
|
/** Not controller for cluster */
|
||||||
|
ERR_NOT_CONTROLLER: 41,
|
||||||
|
/** Invalid request */
|
||||||
|
ERR_INVALID_REQUEST: 42,
|
||||||
|
/** Message format on broker does not support request */
|
||||||
|
ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT: 43,
|
||||||
|
/** Policy violation */
|
||||||
|
ERR_POLICY_VIOLATION: 44,
|
||||||
|
/** Broker received an out of order sequence number */
|
||||||
|
ERR_OUT_OF_ORDER_SEQUENCE_NUMBER: 45,
|
||||||
|
/** Broker received a duplicate sequence number */
|
||||||
|
ERR_DUPLICATE_SEQUENCE_NUMBER: 46,
|
||||||
|
/** Producer attempted an operation with an old epoch */
|
||||||
|
ERR_INVALID_PRODUCER_EPOCH: 47,
|
||||||
|
/** Producer attempted a transactional operation in an invalid state */
|
||||||
|
ERR_INVALID_TXN_STATE: 48,
|
||||||
|
/** Producer attempted to use a producer id which is not
|
||||||
|
* currently assigned to its transactional id */
|
||||||
|
ERR_INVALID_PRODUCER_ID_MAPPING: 49,
|
||||||
|
/** Transaction timeout is larger than the maximum
|
||||||
|
* value allowed by the broker's max.transaction.timeout.ms */
|
||||||
|
ERR_INVALID_TRANSACTION_TIMEOUT: 50,
|
||||||
|
/** Producer attempted to update a transaction while another
|
||||||
|
* concurrent operation on the same transaction was ongoing */
|
||||||
|
ERR_CONCURRENT_TRANSACTIONS: 51,
|
||||||
|
/** Indicates that the transaction coordinator sending a
|
||||||
|
* WriteTxnMarker is no longer the current coordinator for a
|
||||||
|
* given producer */
|
||||||
|
ERR_TRANSACTION_COORDINATOR_FENCED: 52,
|
||||||
|
/** Transactional Id authorization failed */
|
||||||
|
ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: 53,
|
||||||
|
/** Security features are disabled */
|
||||||
|
ERR_SECURITY_DISABLED: 54,
|
||||||
|
/** Operation not attempted */
|
||||||
|
ERR_OPERATION_NOT_ATTEMPTED: 55,
|
||||||
|
/** Disk error when trying to access log file on the disk */
|
||||||
|
ERR_KAFKA_STORAGE_ERROR: 56,
|
||||||
|
/** The user-specified log directory is not found in the broker config */
|
||||||
|
ERR_LOG_DIR_NOT_FOUND: 57,
|
||||||
|
/** SASL Authentication failed */
|
||||||
|
ERR_SASL_AUTHENTICATION_FAILED: 58,
|
||||||
|
/** Unknown Producer Id */
|
||||||
|
ERR_UNKNOWN_PRODUCER_ID: 59,
|
||||||
|
/** Partition reassignment is in progress */
|
||||||
|
ERR_REASSIGNMENT_IN_PROGRESS: 60,
|
||||||
|
/** Delegation Token feature is not enabled */
|
||||||
|
ERR_DELEGATION_TOKEN_AUTH_DISABLED: 61,
|
||||||
|
/** Delegation Token is not found on server */
|
||||||
|
ERR_DELEGATION_TOKEN_NOT_FOUND: 62,
|
||||||
|
/** Specified Principal is not valid Owner/Renewer */
|
||||||
|
ERR_DELEGATION_TOKEN_OWNER_MISMATCH: 63,
|
||||||
|
/** Delegation Token requests are not allowed on this connection */
|
||||||
|
ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED: 64,
|
||||||
|
/** Delegation Token authorization failed */
|
||||||
|
ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED: 65,
|
||||||
|
/** Delegation Token is expired */
|
||||||
|
ERR_DELEGATION_TOKEN_EXPIRED: 66,
|
||||||
|
/** Supplied principalType is not supported */
|
||||||
|
ERR_INVALID_PRINCIPAL_TYPE: 67,
|
||||||
|
/** The group is not empty */
|
||||||
|
ERR_NON_EMPTY_GROUP: 68,
|
||||||
|
/** The group id does not exist */
|
||||||
|
ERR_GROUP_ID_NOT_FOUND: 69,
|
||||||
|
/** The fetch session ID was not found */
|
||||||
|
ERR_FETCH_SESSION_ID_NOT_FOUND: 70,
|
||||||
|
/** The fetch session epoch is invalid */
|
||||||
|
ERR_INVALID_FETCH_SESSION_EPOCH: 71,
|
||||||
|
/** No matching listener */
|
||||||
|
ERR_LISTENER_NOT_FOUND: 72,
|
||||||
|
/** Topic deletion is disabled */
|
||||||
|
ERR_TOPIC_DELETION_DISABLED: 73,
|
||||||
|
/** Leader epoch is older than broker epoch */
|
||||||
|
ERR_FENCED_LEADER_EPOCH: 74,
|
||||||
|
/** Leader epoch is newer than broker epoch */
|
||||||
|
ERR_UNKNOWN_LEADER_EPOCH: 75,
|
||||||
|
/** Unsupported compression type */
|
||||||
|
ERR_UNSUPPORTED_COMPRESSION_TYPE: 76,
|
||||||
|
/** Broker epoch has changed */
|
||||||
|
ERR_STALE_BROKER_EPOCH: 77,
|
||||||
|
/** Leader high watermark is not caught up */
|
||||||
|
ERR_OFFSET_NOT_AVAILABLE: 78,
|
||||||
|
/** Group member needs a valid member ID */
|
||||||
|
ERR_MEMBER_ID_REQUIRED: 79,
|
||||||
|
/** Preferred leader was not available */
|
||||||
|
ERR_PREFERRED_LEADER_NOT_AVAILABLE: 80,
|
||||||
|
/** Consumer group has reached maximum size */
|
||||||
|
ERR_GROUP_MAX_SIZE_REACHED: 81,
|
||||||
|
/** Static consumer fenced by other consumer with same
|
||||||
|
* group.instance.id. */
|
||||||
|
ERR_FENCED_INSTANCE_ID: 82,
|
||||||
|
/** Eligible partition leaders are not available */
|
||||||
|
ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE: 83,
|
||||||
|
/** Leader election not needed for topic partition */
|
||||||
|
ERR_ELECTION_NOT_NEEDED: 84,
|
||||||
|
/** No partition reassignment is in progress */
|
||||||
|
ERR_NO_REASSIGNMENT_IN_PROGRESS: 85,
|
||||||
|
/** Deleting offsets of a topic while the consumer group is
|
||||||
|
* subscribed to it */
|
||||||
|
ERR_GROUP_SUBSCRIBED_TO_TOPIC: 86,
|
||||||
|
/** Broker failed to validate record */
|
||||||
|
ERR_INVALID_RECORD: 87,
|
||||||
|
/** There are unstable offsets that need to be cleared */
|
||||||
|
ERR_UNSTABLE_OFFSET_COMMIT: 88,
|
||||||
|
/** Throttling quota has been exceeded */
|
||||||
|
ERR_THROTTLING_QUOTA_EXCEEDED: 89,
|
||||||
|
/** There is a newer producer with the same transactionalId
|
||||||
|
* which fences the current one */
|
||||||
|
ERR_PRODUCER_FENCED: 90,
|
||||||
|
/** Request illegally referred to resource that does not exist */
|
||||||
|
ERR_RESOURCE_NOT_FOUND: 91,
|
||||||
|
/** Request illegally referred to the same resource twice */
|
||||||
|
ERR_DUPLICATE_RESOURCE: 92,
|
||||||
|
/** Requested credential would not meet criteria for acceptability */
|
||||||
|
ERR_UNACCEPTABLE_CREDENTIAL: 93,
|
||||||
|
/** Indicates that the either the sender or recipient of a
|
||||||
|
* voter-only request is not one of the expected voters */
|
||||||
|
ERR_INCONSISTENT_VOTER_SET: 94,
|
||||||
|
/** Invalid update version */
|
||||||
|
ERR_INVALID_UPDATE_VERSION: 95,
|
||||||
|
/** Unable to update finalized features due to server error */
|
||||||
|
ERR_FEATURE_UPDATE_FAILED: 96,
|
||||||
|
/** Request principal deserialization failed during forwarding */
|
||||||
|
ERR_PRINCIPAL_DESERIALIZATION_FAILURE: 97
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Representation of a librdkafka error
|
||||||
|
*
|
||||||
|
* This can be created by giving either another error
|
||||||
|
* to piggy-back on. In this situation it tries to parse
|
||||||
|
* the error string to figure out the intent. However, more usually,
|
||||||
|
* it is constructed by an error object created by a C++ Baton.
|
||||||
|
*
|
||||||
|
* @param {object|error} e - An object or error to wrap
|
||||||
|
* @property {string} message - The error message
|
||||||
|
* @property {number} code - The error code.
|
||||||
|
* @property {string} origin - The origin, whether it is local or remote
|
||||||
|
* @constructor
|
||||||
|
*/
|
||||||
|
function LibrdKafkaError(e) {
|
||||||
|
if (!(this instanceof LibrdKafkaError)) {
|
||||||
|
return new LibrdKafkaError(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof e === 'number') {
|
||||||
|
this.message = librdkafka.err2str(e);
|
||||||
|
this.code = e;
|
||||||
|
this.errno = e;
|
||||||
|
if (e >= LibrdKafkaError.codes.ERR__END) {
|
||||||
|
this.origin = 'local';
|
||||||
|
} else {
|
||||||
|
this.origin = 'kafka';
|
||||||
|
}
|
||||||
|
Error.captureStackTrace(this, this.constructor);
|
||||||
|
} else if (!util.isError(e)) {
|
||||||
|
// This is the better way
|
||||||
|
this.message = e.message;
|
||||||
|
this.code = e.code;
|
||||||
|
this.errno = e.code;
|
||||||
|
if (e.code >= LibrdKafkaError.codes.ERR__END) {
|
||||||
|
this.origin = 'local';
|
||||||
|
} else {
|
||||||
|
this.origin = 'kafka';
|
||||||
|
}
|
||||||
|
Error.captureStackTrace(this, this.constructor);
|
||||||
|
} else {
|
||||||
|
var message = e.message;
|
||||||
|
var parsedMessage = message.split(': ');
|
||||||
|
|
||||||
|
var origin, msg;
|
||||||
|
|
||||||
|
if (parsedMessage.length > 1) {
|
||||||
|
origin = parsedMessage[0].toLowerCase();
|
||||||
|
msg = parsedMessage[1].toLowerCase();
|
||||||
|
} else {
|
||||||
|
origin = 'unknown';
|
||||||
|
msg = message.toLowerCase();
|
||||||
|
}
|
||||||
|
|
||||||
|
// special cases
|
||||||
|
if (msg === 'consumer is disconnected' || msg === 'producer is disconnected') {
|
||||||
|
this.origin = 'local';
|
||||||
|
this.code = LibrdKafkaError.codes.ERR__STATE;
|
||||||
|
this.errno = this.code;
|
||||||
|
this.message = msg;
|
||||||
|
} else {
|
||||||
|
this.origin = origin;
|
||||||
|
this.message = msg;
|
||||||
|
this.code = typeof e.code === 'number' ? e.code : -1;
|
||||||
|
this.errno = this.code;
|
||||||
|
this.stack = e.stack;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if (e.hasOwnProperty('isFatal')) this.isFatal = e.isFatal;
|
||||||
|
if (e.hasOwnProperty('isRetriable')) this.isRetriable = e.isRetriable;
|
||||||
|
if (e.hasOwnProperty('isTxnRequiresAbort')) this.isTxnRequiresAbort = e.isTxnRequiresAbort;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function createLibrdkafkaError(e) {
|
||||||
|
return new LibrdKafkaError(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
function errorWrap(errorCode, intIsError) {
|
||||||
|
var returnValue = true;
|
||||||
|
if (intIsError) {
|
||||||
|
returnValue = errorCode;
|
||||||
|
errorCode = typeof errorCode === 'number' ? errorCode : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (errorCode !== LibrdKafkaError.codes.ERR_NO_ERROR) {
|
||||||
|
var e = LibrdKafkaError.create(errorCode);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
|
||||||
|
return returnValue;
|
||||||
|
}
|
34
lib/index.js
Normal file
34
lib/index.js
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var KafkaConsumer = require('./kafka-consumer');
|
||||||
|
var Producer = require('./producer');
|
||||||
|
var HighLevelProducer = require('./producer/high-level-producer');
|
||||||
|
var error = require('./error');
|
||||||
|
var util = require('util');
|
||||||
|
var lib = require('../librdkafka');
|
||||||
|
var Topic = require('./topic');
|
||||||
|
var Admin = require('./admin');
|
||||||
|
var features = lib.features().split(',');
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
Consumer: util.deprecate(KafkaConsumer, 'Use KafkaConsumer instead. This may be changed in a later version'),
|
||||||
|
Producer: Producer,
|
||||||
|
HighLevelProducer: HighLevelProducer,
|
||||||
|
AdminClient: Admin,
|
||||||
|
KafkaConsumer: KafkaConsumer,
|
||||||
|
createReadStream: KafkaConsumer.createReadStream,
|
||||||
|
createWriteStream: Producer.createWriteStream,
|
||||||
|
CODES: {
|
||||||
|
ERRORS: error.codes,
|
||||||
|
},
|
||||||
|
Topic: Topic,
|
||||||
|
features: features,
|
||||||
|
librdkafkaVersion: lib.librdkafkaVersion
|
||||||
|
};
|
381
lib/kafka-consumer-stream.js
Normal file
381
lib/kafka-consumer-stream.js
Normal file
@ -0,0 +1,381 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
'use strict';
|
||||||
|
|
||||||
|
module.exports = KafkaConsumerStream;
|
||||||
|
|
||||||
|
var Readable = require('stream').Readable;
|
||||||
|
var util = require('util');
|
||||||
|
|
||||||
|
util.inherits(KafkaConsumerStream, Readable);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ReadableStream integrating with the Kafka Consumer.
|
||||||
|
*
|
||||||
|
* This class is used to read data off of Kafka in a streaming way. It is
|
||||||
|
* useful if you'd like to have a way to pipe Kafka into other systems. You
|
||||||
|
* should generally not make this class yourself, as it is not even exposed
|
||||||
|
* as part of module.exports. Instead, you should KafkaConsumer.createReadStream.
|
||||||
|
*
|
||||||
|
* The stream implementation is slower than the continuous subscribe callback.
|
||||||
|
* If you don't care so much about backpressure and would rather squeeze
|
||||||
|
* out performance, use that method. Using the stream will ensure you read only
|
||||||
|
* as fast as you write.
|
||||||
|
*
|
||||||
|
* The stream detects if Kafka is already connected. If it is, it will begin
|
||||||
|
* reading. If it is not, it will connect and read when it is ready.
|
||||||
|
*
|
||||||
|
* This stream operates in objectMode. It streams {Consumer~Message}
|
||||||
|
*
|
||||||
|
* @param {Consumer} consumer - The Kafka Consumer object.
|
||||||
|
* @param {object} options - Options to configure the stream.
|
||||||
|
* @param {number} options.waitInterval - Number of ms to wait if Kafka reports
|
||||||
|
* that it has timed out or that we are out of messages (right now).
|
||||||
|
* @param {array} options.topics - Array of topics, or a function that parses
|
||||||
|
* metadata into an array of topics
|
||||||
|
* @constructor
|
||||||
|
* @extends stream.Readable
|
||||||
|
* @see Consumer~Message
|
||||||
|
*/
|
||||||
|
function KafkaConsumerStream(consumer, options) {
|
||||||
|
if (!(this instanceof KafkaConsumerStream)) {
|
||||||
|
return new KafkaConsumerStream(consumer, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options === undefined) {
|
||||||
|
options = { waitInterval: 1000 };
|
||||||
|
} else if (typeof options === 'number') {
|
||||||
|
options = { waitInterval: options };
|
||||||
|
} else if (options === null || typeof options !== 'object') {
|
||||||
|
throw new TypeError('"options" argument must be a number or an object');
|
||||||
|
}
|
||||||
|
|
||||||
|
var topics = options.topics;
|
||||||
|
|
||||||
|
if (typeof topics === 'function') {
|
||||||
|
// Just ignore the rest of the checks here
|
||||||
|
} else if (!Array.isArray(topics)) {
|
||||||
|
if (typeof topics !== 'string' && !(topics instanceof RegExp)) {
|
||||||
|
throw new TypeError('"topics" argument must be a string, regex, or an array');
|
||||||
|
} else {
|
||||||
|
topics = [topics];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
options = Object.create(options);
|
||||||
|
|
||||||
|
var fetchSize = options.fetchSize || 1;
|
||||||
|
|
||||||
|
// Run in object mode by default.
|
||||||
|
if (options.objectMode === null || options.objectMode === undefined) {
|
||||||
|
options.objectMode = true;
|
||||||
|
|
||||||
|
// If they did not explicitly set high water mark, and we are running
|
||||||
|
// in object mode, set it to the fetch size + 2 to ensure there is room
|
||||||
|
// for a standard fetch
|
||||||
|
if (!options.highWaterMark) {
|
||||||
|
options.highWaterMark = fetchSize + 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options.objectMode !== true) {
|
||||||
|
this._read = this._read_buffer;
|
||||||
|
} else {
|
||||||
|
this._read = this._read_message;
|
||||||
|
}
|
||||||
|
|
||||||
|
Readable.call(this, options);
|
||||||
|
|
||||||
|
this.consumer = consumer;
|
||||||
|
this.topics = topics;
|
||||||
|
this.autoClose = options.autoClose === undefined ? true : !!options.autoClose;
|
||||||
|
this.waitInterval = options.waitInterval === undefined ? 1000 : options.waitInterval;
|
||||||
|
this.fetchSize = fetchSize;
|
||||||
|
this.connectOptions = options.connectOptions || {};
|
||||||
|
this.streamAsBatch = options.streamAsBatch || false;
|
||||||
|
|
||||||
|
// Hold the messages in here
|
||||||
|
this.messages = [];
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
this.consumer
|
||||||
|
.on('unsubscribed', function() {
|
||||||
|
// Invalidate the stream when we unsubscribe
|
||||||
|
self.push(null);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Call connect. Handles potentially being connected already
|
||||||
|
this.connect(this.connectOptions);
|
||||||
|
|
||||||
|
this.once('end', function() {
|
||||||
|
if (this.autoClose) {
|
||||||
|
this.destroy();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal stream read method. This method reads message objects.
|
||||||
|
* @param {number} size - This parameter is ignored for our cases.
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
KafkaConsumerStream.prototype._read_message = function(size) {
|
||||||
|
if (this.messages.length > 0) {
|
||||||
|
return this.push(this.messages.shift());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.consumer) {
|
||||||
|
// This consumer is set to `null` in the close function
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.consumer.isConnected()) {
|
||||||
|
this.consumer.once('ready', function() {
|
||||||
|
// This is the way Node.js does it
|
||||||
|
// https://github.com/nodejs/node/blob/master/lib/fs.js#L1733
|
||||||
|
this._read(size);
|
||||||
|
}.bind(this));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.destroyed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
// If the size (number of messages) we are being advised to fetch is
|
||||||
|
// greater than or equal to the fetch size, use the fetch size.
|
||||||
|
// Only opt to use the size in case it is LESS than the fetch size.
|
||||||
|
// Essentially, we want to use the smaller value here
|
||||||
|
var fetchSize = size >= this.fetchSize ? this.fetchSize : size;
|
||||||
|
|
||||||
|
this.consumer.consume(fetchSize, onread);
|
||||||
|
|
||||||
|
// Retry function. Will wait up to the wait interval, with some
|
||||||
|
// random noise if one is provided. Otherwise, will go immediately.
|
||||||
|
function retry() {
|
||||||
|
if (!self.waitInterval) {
|
||||||
|
setImmediate(function() {
|
||||||
|
self._read(size);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
setTimeout(function() {
|
||||||
|
self._read(size);
|
||||||
|
}, self.waitInterval * Math.random()).unref();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function onread(err, messages) {
|
||||||
|
|
||||||
|
// If there was an error we still want to emit it.
|
||||||
|
// Essentially, if the user does not register an error
|
||||||
|
// handler, it will still cause the stream to blow up.
|
||||||
|
//
|
||||||
|
// But... if one is provided, consumption will move on
|
||||||
|
// as normal
|
||||||
|
if (err) {
|
||||||
|
self.emit('error', err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are no messages it means we reached EOF or a timeout.
|
||||||
|
// Do what we used to do
|
||||||
|
|
||||||
|
if (err || messages.length < 1) {
|
||||||
|
// If we got an error or if there were no messages, initiate a retry
|
||||||
|
retry();
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
if (self.streamAsBatch) {
|
||||||
|
self.push(messages);
|
||||||
|
} else {
|
||||||
|
for (var i = 0; i < messages.length; i++) {
|
||||||
|
self.messages.push(messages[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that we have added them all the inner messages buffer,
|
||||||
|
// we can just push the most recent one
|
||||||
|
self.push(self.messages.shift());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal stream read method. This method reads message buffers.
|
||||||
|
* @param {number} size - This parameter is ignored for our cases.
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
KafkaConsumerStream.prototype._read_buffer = function(size) {
|
||||||
|
if (this.messages.length > 0) {
|
||||||
|
return this.push(this.messages.shift());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.consumer) {
|
||||||
|
// This consumer is set to `null` in the close function
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.consumer.isConnected()) {
|
||||||
|
this.consumer.once('ready', function() {
|
||||||
|
// This is the way Node.js does it
|
||||||
|
// https://github.com/nodejs/node/blob/master/lib/fs.js#L1733
|
||||||
|
this._read(size);
|
||||||
|
}.bind(this));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.destroyed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
// If the size (number of messages) we are being advised to fetch is
|
||||||
|
// greater than or equal to the fetch size, use the fetch size.
|
||||||
|
// Only opt to use the size in case it is LESS than the fetch size.
|
||||||
|
// Essentially, we want to use the smaller value here
|
||||||
|
var fetchSize = size >= this.fetchSize ? this.fetchSize : size;
|
||||||
|
|
||||||
|
this.consumer.consume(fetchSize, onread);
|
||||||
|
|
||||||
|
// Retry function. Will wait up to the wait interval, with some
|
||||||
|
// random noise if one is provided. Otherwise, will go immediately.
|
||||||
|
function retry() {
|
||||||
|
if (!self.waitInterval) {
|
||||||
|
setImmediate(function() {
|
||||||
|
self._read(size);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
setTimeout(function() {
|
||||||
|
self._read(size);
|
||||||
|
}, self.waitInterval * Math.random()).unref();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function onread(err, messages) {
|
||||||
|
// If there was an error we still want to emit it.
|
||||||
|
// Essentially, if the user does not register an error
|
||||||
|
// handler, it will still cause the stream to blow up.
|
||||||
|
//
|
||||||
|
// But... if one is provided, consumption will move on
|
||||||
|
// as normal
|
||||||
|
if (err) {
|
||||||
|
self.emit('error', err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are no messages it means we reached EOF or a timeout.
|
||||||
|
// Do what we used to do
|
||||||
|
|
||||||
|
if (err || messages.length < 1) {
|
||||||
|
// If we got an error or if there were no messages, initiate a retry
|
||||||
|
retry();
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
if (self.streamAsBatch) {
|
||||||
|
self.push(messages);
|
||||||
|
} else {
|
||||||
|
for (var i = 0; i < messages.length; i++) {
|
||||||
|
self.messages.push(messages[i].value);
|
||||||
|
}
|
||||||
|
// Now that we have added them all the inner messages buffer,
|
||||||
|
// we can just push the most recent one
|
||||||
|
self.push(self.messages.shift());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
KafkaConsumerStream.prototype.connect = function(options) {
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
function connectCallback(err, metadata) {
|
||||||
|
if (err) {
|
||||||
|
self.emit('error', err);
|
||||||
|
self.destroy();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Subscribe to the topics as well so we will be ready
|
||||||
|
// If this throws the stream is invalid
|
||||||
|
|
||||||
|
// This is the magic part. If topics is a function, before we subscribe,
|
||||||
|
// pass the metadata in
|
||||||
|
if (typeof self.topics === 'function') {
|
||||||
|
var topics = self.topics(metadata);
|
||||||
|
self.consumer.subscribe(topics);
|
||||||
|
} else {
|
||||||
|
self.consumer.subscribe(self.topics);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
self.emit('error', e);
|
||||||
|
self.destroy();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// start the flow of data
|
||||||
|
self.read();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.consumer.isConnected()) {
|
||||||
|
self.consumer.connect(options, connectCallback);
|
||||||
|
} else {
|
||||||
|
// Immediately call the connect callback
|
||||||
|
setImmediate(function() {
|
||||||
|
connectCallback(null, self.consumer._metadata);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
KafkaConsumerStream.prototype.destroy = function() {
|
||||||
|
if (this.destroyed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.destroyed = true;
|
||||||
|
this.close();
|
||||||
|
};
|
||||||
|
|
||||||
|
KafkaConsumerStream.prototype.close = function(cb) {
|
||||||
|
var self = this;
|
||||||
|
if (cb) {
|
||||||
|
this.once('close', cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!self.consumer._isConnecting && !self.consumer._isConnected) {
|
||||||
|
// If we aren't even connected just exit. We are done.
|
||||||
|
close();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (self.consumer._isConnecting) {
|
||||||
|
self.consumer.once('ready', function() {
|
||||||
|
// Don't pass the CB because it has already been passed.
|
||||||
|
self.close();
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (self.consumer._isConnected) {
|
||||||
|
self.consumer.unsubscribe();
|
||||||
|
self.consumer.disconnect(function() {
|
||||||
|
close();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function close() {
|
||||||
|
self.emit('close');
|
||||||
|
}
|
||||||
|
};
|
656
lib/kafka-consumer.js
Normal file
656
lib/kafka-consumer.js
Normal file
@ -0,0 +1,656 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
'use strict';
|
||||||
|
|
||||||
|
module.exports = KafkaConsumer;
|
||||||
|
|
||||||
|
var Client = require('./client');
|
||||||
|
var util = require('util');
|
||||||
|
var Kafka = require('../librdkafka');
|
||||||
|
var KafkaConsumerStream = require('./kafka-consumer-stream');
|
||||||
|
var LibrdKafkaError = require('./error');
|
||||||
|
var TopicPartition = require('./topic-partition');
|
||||||
|
var shallowCopy = require('./util').shallowCopy;
|
||||||
|
var DEFAULT_CONSUME_LOOP_TIMEOUT_DELAY = 500;
|
||||||
|
var DEFAULT_CONSUME_TIME_OUT = 1000;
|
||||||
|
util.inherits(KafkaConsumer, Client);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* KafkaConsumer class for reading messages from Kafka
|
||||||
|
*
|
||||||
|
* This is the main entry point for reading data from Kafka. You
|
||||||
|
* configure this like you do any other client, with a global
|
||||||
|
* configuration and default topic configuration.
|
||||||
|
*
|
||||||
|
* Once you instantiate this object, connecting will open a socket.
|
||||||
|
* Data will not be read until you tell the consumer what topics
|
||||||
|
* you want to read from.
|
||||||
|
*
|
||||||
|
* @param {object} conf - Key value pairs to configure the consumer
|
||||||
|
* @param {object} topicConf - Key value pairs to create a default
|
||||||
|
* topic configuration
|
||||||
|
* @extends Client
|
||||||
|
* @constructor
|
||||||
|
*/
|
||||||
|
function KafkaConsumer(conf, topicConf) {
|
||||||
|
if (!(this instanceof KafkaConsumer)) {
|
||||||
|
return new KafkaConsumer(conf, topicConf);
|
||||||
|
}
|
||||||
|
|
||||||
|
conf = shallowCopy(conf);
|
||||||
|
topicConf = shallowCopy(topicConf);
|
||||||
|
|
||||||
|
var onRebalance = conf.rebalance_cb;
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
// If rebalance is undefined we don't want any part of this
|
||||||
|
if (onRebalance && typeof onRebalance === 'boolean') {
|
||||||
|
conf.rebalance_cb = function(err, assignment) {
|
||||||
|
// Create the librdkafka error
|
||||||
|
err = LibrdKafkaError.create(err);
|
||||||
|
// Emit the event
|
||||||
|
self.emit('rebalance', err, assignment);
|
||||||
|
|
||||||
|
// That's it
|
||||||
|
try {
|
||||||
|
if (err.code === -175 /*ERR__ASSIGN_PARTITIONS*/) {
|
||||||
|
self.assign(assignment);
|
||||||
|
} else if (err.code === -174 /*ERR__REVOKE_PARTITIONS*/) {
|
||||||
|
self.unassign();
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Ignore exceptions if we are not connected
|
||||||
|
if (self.isConnected()) {
|
||||||
|
self.emit('rebalance.error', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else if (onRebalance && typeof onRebalance === 'function') {
|
||||||
|
/*
|
||||||
|
* Once this is opted in to, that's it. It's going to manually rebalance
|
||||||
|
* forever. There is no way to unset config values in librdkafka, just
|
||||||
|
* a way to override them.
|
||||||
|
*/
|
||||||
|
|
||||||
|
conf.rebalance_cb = function(err, assignment) {
|
||||||
|
// Create the librdkafka error
|
||||||
|
err = err ? LibrdKafkaError.create(err) : undefined;
|
||||||
|
|
||||||
|
self.emit('rebalance', err, assignment);
|
||||||
|
onRebalance.call(self, err, assignment);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same treatment for offset_commit_cb
|
||||||
|
var onOffsetCommit = conf.offset_commit_cb;
|
||||||
|
|
||||||
|
if (onOffsetCommit && typeof onOffsetCommit === 'boolean') {
|
||||||
|
conf.offset_commit_cb = function(err, offsets) {
|
||||||
|
if (err) {
|
||||||
|
err = LibrdKafkaError.create(err);
|
||||||
|
}
|
||||||
|
// Emit the event
|
||||||
|
self.emit('offset.commit', err, offsets);
|
||||||
|
};
|
||||||
|
} else if (onOffsetCommit && typeof onOffsetCommit === 'function') {
|
||||||
|
conf.offset_commit_cb = function(err, offsets) {
|
||||||
|
if (err) {
|
||||||
|
err = LibrdKafkaError.create(err);
|
||||||
|
}
|
||||||
|
// Emit the event
|
||||||
|
self.emit('offset.commit', err, offsets);
|
||||||
|
onOffsetCommit.call(self, err, offsets);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* KafkaConsumer message.
|
||||||
|
*
|
||||||
|
* This is the representation of a message read from Kafka.
|
||||||
|
*
|
||||||
|
* @typedef {object} KafkaConsumer~Message
|
||||||
|
* @property {buffer} value - the message buffer from Kafka.
|
||||||
|
* @property {string} topic - the topic name
|
||||||
|
* @property {number} partition - the partition on the topic the
|
||||||
|
* message was on
|
||||||
|
* @property {number} offset - the offset of the message
|
||||||
|
* @property {string} key - the message key
|
||||||
|
* @property {number} size - message size, in bytes.
|
||||||
|
* @property {number} timestamp - message timestamp
|
||||||
|
*/
|
||||||
|
|
||||||
|
Client.call(this, conf, Kafka.KafkaConsumer, topicConf);
|
||||||
|
|
||||||
|
this.globalConfig = conf;
|
||||||
|
this.topicConfig = topicConf;
|
||||||
|
|
||||||
|
this._consumeTimeout = DEFAULT_CONSUME_TIME_OUT;
|
||||||
|
this._consumeLoopTimeoutDelay = DEFAULT_CONSUME_LOOP_TIMEOUT_DELAY;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the default consume timeout provided to c++land
|
||||||
|
* @param {number} timeoutMs - number of milliseconds to wait for a message to be fetched
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.setDefaultConsumeTimeout = function(timeoutMs) {
|
||||||
|
this._consumeTimeout = timeoutMs;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the default sleep delay for the next consume loop after the previous one has timed out.
|
||||||
|
* @param {number} intervalMs - number of milliseconds to sleep after a message fetch has timed out
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.setDefaultConsumeLoopTimeoutDelay = function(intervalMs) {
|
||||||
|
this._consumeLoopTimeoutDelay = intervalMs;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a stream representation of this KafkaConsumer
|
||||||
|
*
|
||||||
|
* @see TopicReadable
|
||||||
|
* @example
|
||||||
|
* var consumerStream = Kafka.KafkaConsumer.createReadStream({
|
||||||
|
* 'metadata.broker.list': 'localhost:9092',
|
||||||
|
* 'group.id': 'librd-test',
|
||||||
|
* 'socket.keepalive.enable': true,
|
||||||
|
* 'enable.auto.commit': false
|
||||||
|
* }, {}, { topics: [ 'test' ] });
|
||||||
|
*
|
||||||
|
* @param {object} conf - Key value pairs to configure the consumer
|
||||||
|
* @param {object} topicConf - Key value pairs to create a default
|
||||||
|
* topic configuration
|
||||||
|
* @param {object} streamOptions - Stream options
|
||||||
|
* @param {array} streamOptions.topics - Array of topics to subscribe to.
|
||||||
|
* @return {KafkaConsumerStream} - Readable stream that receives messages
|
||||||
|
* when new ones become available.
|
||||||
|
*/
|
||||||
|
KafkaConsumer.createReadStream = function(conf, topicConf, streamOptions) {
|
||||||
|
var consumer = new KafkaConsumer(conf, topicConf);
|
||||||
|
return new KafkaConsumerStream(consumer, streamOptions);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a current list of the committed offsets per topic partition
|
||||||
|
*
|
||||||
|
* Returns an array of objects in the form of a topic partition list
|
||||||
|
*
|
||||||
|
* @param {TopicPartition[]} toppars - Topic partition list to query committed
|
||||||
|
* offsets for. Defaults to the current assignment
|
||||||
|
* @param {number} timeout - Number of ms to block before calling back
|
||||||
|
* and erroring
|
||||||
|
* @param {Function} cb - Callback method to execute when finished or timed
|
||||||
|
* out
|
||||||
|
* @return {Client} - Returns itself
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.committed = function(toppars, timeout, cb) {
|
||||||
|
// We want to be backwards compatible here, and the previous version of
|
||||||
|
// this function took two arguments
|
||||||
|
|
||||||
|
// If CB is not set, shift to backwards compatible version
|
||||||
|
if (!cb) {
|
||||||
|
cb = arguments[1];
|
||||||
|
timeout = arguments[0];
|
||||||
|
toppars = this.assignments();
|
||||||
|
} else {
|
||||||
|
toppars = toppars || this.assignments();
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
this._client.committed(toppars, timeout, function(err, topicPartitions) {
|
||||||
|
if (err) {
|
||||||
|
cb(LibrdKafkaError.create(err));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cb(null, topicPartitions);
|
||||||
|
});
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Seek consumer for topic+partition to offset which is either an absolute or
|
||||||
|
* logical offset.
|
||||||
|
*
|
||||||
|
* Does not return anything, as it is asynchronous. There are special cases
|
||||||
|
* with the timeout parameter. The consumer must have previously been assigned
|
||||||
|
* to topics and partitions that seek seeks to seek.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* consumer.seek({ topic: 'topic', partition: 0, offset: 1000 }, 0, function(err) {
|
||||||
|
* if (err) {
|
||||||
|
*
|
||||||
|
* }
|
||||||
|
* });
|
||||||
|
*
|
||||||
|
* @param {TopicPartition} toppar - Topic partition to seek.
|
||||||
|
* @param {number} timeout - Number of ms to block before calling back
|
||||||
|
* and erroring. If the parameter is null or 0, the call will not wait
|
||||||
|
* for the seek to be performed. Essentially, it will happen in the background
|
||||||
|
* with no notification
|
||||||
|
* @param {Function} cb - Callback method to execute when finished or timed
|
||||||
|
* out. If the seek timed out, the internal state of the consumer is unknown.
|
||||||
|
* @return {Client} - Returns itself
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.seek = function(toppar, timeout, cb) {
|
||||||
|
var self = this;
|
||||||
|
this._client.seek(TopicPartition.create(toppar), timeout, function(err) {
|
||||||
|
if (err) {
|
||||||
|
cb(LibrdKafkaError.create(err));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assign the consumer specific partitions and topics
|
||||||
|
*
|
||||||
|
* @param {array} assignments - Assignments array. Should contain
|
||||||
|
* objects with topic and partition set.
|
||||||
|
* @return {Client} - Returns itself
|
||||||
|
*/
|
||||||
|
|
||||||
|
KafkaConsumer.prototype.assign = function(assignments) {
|
||||||
|
this._client.assign(TopicPartition.map(assignments));
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unassign the consumer from its assigned partitions and topics.
|
||||||
|
*
|
||||||
|
* @return {Client} - Returns itself
|
||||||
|
*/
|
||||||
|
|
||||||
|
KafkaConsumer.prototype.unassign = function() {
|
||||||
|
this._client.unassign();
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the assignments for the consumer
|
||||||
|
*
|
||||||
|
* @return {array} assignments - Array of topic partitions
|
||||||
|
*/
|
||||||
|
|
||||||
|
KafkaConsumer.prototype.assignments = function() {
|
||||||
|
return this._errorWrap(this._client.assignments(), true);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Subscribe to an array of topics (synchronously).
|
||||||
|
*
|
||||||
|
* This operation is pretty fast because it just sets
|
||||||
|
* an assignment in librdkafka. This is the recommended
|
||||||
|
* way to deal with subscriptions in a situation where you
|
||||||
|
* will be reading across multiple files or as part of
|
||||||
|
* your configure-time initialization.
|
||||||
|
*
|
||||||
|
* This is also a good way to do it for streams.
|
||||||
|
*
|
||||||
|
* @param {array} topics - An array of topics to listen to
|
||||||
|
* @throws - Throws when an error code came back from native land
|
||||||
|
* @return {KafkaConsumer} - Returns itself.
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.subscribe = function(topics) {
|
||||||
|
// Will throw if it is a bad error.
|
||||||
|
this._errorWrap(this._client.subscribe(topics));
|
||||||
|
this.emit('subscribed', topics);
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current subscription of the KafkaConsumer
|
||||||
|
*
|
||||||
|
* Get a list of subscribed topics. Should generally match what you
|
||||||
|
* passed on via subscribe
|
||||||
|
*
|
||||||
|
* @see KafkaConsumer::subscribe
|
||||||
|
* @throws - Throws when an error code came back from native land
|
||||||
|
* @return {array} - Array of strings to show the current assignment
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.subscription = function() {
|
||||||
|
return this._errorWrap(this._client.subscription(), true);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current offset position of the KafkaConsumer
|
||||||
|
*
|
||||||
|
* Returns a list of RdKafka::TopicPartitions on success, or throws
|
||||||
|
* an error on failure
|
||||||
|
*
|
||||||
|
* @param {TopicPartition[]} toppars - List of topic partitions to query
|
||||||
|
* position for. Defaults to the current assignment
|
||||||
|
* @throws - Throws when an error code came back from native land
|
||||||
|
* @return {array} - TopicPartition array. Each item is an object with
|
||||||
|
* an offset, topic, and partition
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.position = function(toppars) {
|
||||||
|
if (!toppars) {
|
||||||
|
toppars = this.assignments();
|
||||||
|
}
|
||||||
|
return this._errorWrap(this._client.position(toppars), true);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unsubscribe from all currently subscribed topics
|
||||||
|
*
|
||||||
|
* Before you subscribe to new topics you need to unsubscribe
|
||||||
|
* from the old ones, if there is an active subscription.
|
||||||
|
* Otherwise, you will get an error because there is an
|
||||||
|
* existing subscription.
|
||||||
|
*
|
||||||
|
* @throws - Throws when an error code comes back from native land
|
||||||
|
* @return {KafkaConsumer} - Returns itself.
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.unsubscribe = function() {
|
||||||
|
this._errorWrap(this._client.unsubscribe());
|
||||||
|
this.emit('unsubscribed', []);
|
||||||
|
// Backwards compatible change
|
||||||
|
this.emit('unsubscribe', []);
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read a number of messages from Kafka.
|
||||||
|
*
|
||||||
|
* This method is similar to the main one, except that it reads a number
|
||||||
|
* of messages before calling back. This may get better performance than
|
||||||
|
* reading a single message each time in stream implementations.
|
||||||
|
*
|
||||||
|
* This will keep going until it gets ERR__PARTITION_EOF or ERR__TIMED_OUT
|
||||||
|
* so the array may not be the same size you ask for. The size is advisory,
|
||||||
|
* but we will not exceed it.
|
||||||
|
*
|
||||||
|
* @param {number} size - Number of messages to read
|
||||||
|
* @param {KafkaConsumer~readCallback} cb - Callback to return when work is done.
|
||||||
|
*//**
|
||||||
|
* Read messages from Kafka as fast as possible
|
||||||
|
*
|
||||||
|
* This method keeps a background thread running to fetch the messages
|
||||||
|
* as quickly as it can, sleeping only in between EOF and broker timeouts.
|
||||||
|
*
|
||||||
|
* Use this to get the maximum read performance if you don't care about the
|
||||||
|
* stream backpressure.
|
||||||
|
* @param {KafkaConsumer~readCallback} cb - Callback to return when a message
|
||||||
|
* is fetched.
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.consume = function(number, cb) {
|
||||||
|
var timeoutMs = this._consumeTimeout || DEFAULT_CONSUME_TIME_OUT;
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
if ((number && typeof number === 'number') || (number && cb)) {
|
||||||
|
|
||||||
|
if (cb === undefined) {
|
||||||
|
cb = function() {};
|
||||||
|
} else if (typeof cb !== 'function') {
|
||||||
|
throw new TypeError('Callback must be a function');
|
||||||
|
}
|
||||||
|
|
||||||
|
this._consumeNum(timeoutMs, number, cb);
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// See https://github.com/Blizzard/node-rdkafka/issues/220
|
||||||
|
// Docs specify just a callback can be provided but really we needed
|
||||||
|
// a fallback to the number argument
|
||||||
|
// @deprecated
|
||||||
|
if (cb === undefined) {
|
||||||
|
if (typeof number === 'function') {
|
||||||
|
cb = number;
|
||||||
|
} else {
|
||||||
|
cb = function() {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this._consumeLoop(timeoutMs, cb);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Open a background thread and keep getting messages as fast
|
||||||
|
* as we can. Should not be called directly, and instead should
|
||||||
|
* be called using consume.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @see consume
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype._consumeLoop = function(timeoutMs, cb) {
|
||||||
|
var self = this;
|
||||||
|
var retryReadInterval = this._consumeLoopTimeoutDelay;
|
||||||
|
self._client.consumeLoop(timeoutMs, retryReadInterval, function readCallback(err, message, eofEvent, warning) {
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
// A few different types of errors here
|
||||||
|
// but the two we do NOT care about are
|
||||||
|
// time outs at least now
|
||||||
|
// Broker no more messages will also not come here
|
||||||
|
cb(LibrdKafkaError.create(err));
|
||||||
|
} else if (eofEvent) {
|
||||||
|
self.emit('partition.eof', eofEvent);
|
||||||
|
} else if (warning) {
|
||||||
|
self.emit('warning', LibrdKafkaError.create(warning));
|
||||||
|
} else {
|
||||||
|
/**
|
||||||
|
* Data event. called whenever a message is received.
|
||||||
|
*
|
||||||
|
* @event KafkaConsumer#data
|
||||||
|
* @type {KafkaConsumer~Message}
|
||||||
|
*/
|
||||||
|
self.emit('data', message);
|
||||||
|
cb(err, message);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Consume a number of messages and wrap in a try catch with
|
||||||
|
* proper error reporting. Should not be called directly,
|
||||||
|
* and instead should be called using consume.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @see consume
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype._consumeNum = function(timeoutMs, numMessages, cb) {
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
this._client.consume(timeoutMs, numMessages, function(err, messages, eofEvents) {
|
||||||
|
if (err) {
|
||||||
|
err = LibrdKafkaError.create(err);
|
||||||
|
if (cb) {
|
||||||
|
cb(err);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var currentEofEventsIndex = 0;
|
||||||
|
|
||||||
|
function emitEofEventsFor(messageIndex) {
|
||||||
|
while (currentEofEventsIndex < eofEvents.length && eofEvents[currentEofEventsIndex].messageIndex === messageIndex) {
|
||||||
|
delete eofEvents[currentEofEventsIndex].messageIndex;
|
||||||
|
self.emit('partition.eof', eofEvents[currentEofEventsIndex])
|
||||||
|
++currentEofEventsIndex;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emitEofEventsFor(-1);
|
||||||
|
|
||||||
|
for (var i = 0; i < messages.length; i++) {
|
||||||
|
self.emit('data', messages[i]);
|
||||||
|
emitEofEventsFor(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
emitEofEventsFor(messages.length);
|
||||||
|
|
||||||
|
if (cb) {
|
||||||
|
cb(null, messages);
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This callback returns the message read from Kafka.
|
||||||
|
*
|
||||||
|
* @callback KafkaConsumer~readCallback
|
||||||
|
* @param {LibrdKafkaError} err - An error, if one occurred while reading
|
||||||
|
* the data.
|
||||||
|
* @param {KafkaConsumer~Message} message
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit a topic partition or all topic partitions that have been read
|
||||||
|
*
|
||||||
|
* If you provide a topic partition, it will commit that. Otherwise,
|
||||||
|
* it will commit all read offsets for all topic partitions.
|
||||||
|
*
|
||||||
|
* @param {object|array|null} - Topic partition object to commit, list of topic
|
||||||
|
* partitions, or null if you want to commit all read offsets.
|
||||||
|
* @throws When commit returns a non 0 error code
|
||||||
|
*
|
||||||
|
* @return {KafkaConsumer} - returns itself.
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.commit = function(topicPartition) {
|
||||||
|
this._errorWrap(this._client.commit(topicPartition), true);
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit a message
|
||||||
|
*
|
||||||
|
* This is basically a convenience method to map commit properly. We need to
|
||||||
|
* add one to the offset in this case
|
||||||
|
*
|
||||||
|
* @param {object} - Message object to commit
|
||||||
|
* @throws When commit returns a non 0 error code
|
||||||
|
*
|
||||||
|
* @return {KafkaConsumer} - returns itself.
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.commitMessage = function(msg) {
|
||||||
|
var topicPartition = {
|
||||||
|
topic: msg.topic,
|
||||||
|
partition: msg.partition,
|
||||||
|
offset: msg.offset + 1
|
||||||
|
};
|
||||||
|
|
||||||
|
this._errorWrap(this._client.commit(topicPartition), true);
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit a topic partition (or all topic partitions) synchronously
|
||||||
|
*
|
||||||
|
* @param {object|array|null} - Topic partition object to commit, list of topic
|
||||||
|
* partitions, or null if you want to commit all read offsets.
|
||||||
|
* @throws {LibrdKafkaError} - if the commit fails
|
||||||
|
*
|
||||||
|
* @return {KafkaConsumer} - returns itself.
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.commitSync = function(topicPartition) {
|
||||||
|
this._errorWrap(this._client.commitSync(topicPartition), true);
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit a message synchronously
|
||||||
|
*
|
||||||
|
* @see KafkaConsumer#commitMessageSync
|
||||||
|
* @param {object} msg - A message object to commit.
|
||||||
|
*
|
||||||
|
* @throws {LibrdKafkaError} - if the commit fails
|
||||||
|
*
|
||||||
|
* @return {KafkaConsumer} - returns itself.
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.commitMessageSync = function(msg) {
|
||||||
|
var topicPartition = {
|
||||||
|
topic: msg.topic,
|
||||||
|
partition: msg.partition,
|
||||||
|
offset: msg.offset + 1
|
||||||
|
};
|
||||||
|
|
||||||
|
this._errorWrap(this._client.commitSync(topicPartition), true);
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get last known offsets from the client.
|
||||||
|
*
|
||||||
|
* The low offset is updated periodically (if statistics.interval.ms is set)
|
||||||
|
* while the high offset is updated on each fetched message set from the
|
||||||
|
* broker.
|
||||||
|
*
|
||||||
|
* If there is no cached offset (either low or high, or both), then this will
|
||||||
|
* throw an error.
|
||||||
|
*
|
||||||
|
* @param {string} topic - Topic to recieve offsets from.
|
||||||
|
* @param {number} partition - Partition of the provided topic to recieve offsets from
|
||||||
|
* @return {Client~watermarkOffsets} - Returns an object with a high and low property, specifying
|
||||||
|
* the high and low offsets for the topic partition
|
||||||
|
* @throws {LibrdKafkaError} - Throws when there is no offset stored
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.getWatermarkOffsets = function(topic, partition) {
|
||||||
|
if (!this.isConnected()) {
|
||||||
|
throw new Error('Client is disconnected');
|
||||||
|
}
|
||||||
|
|
||||||
|
return this._errorWrap(this._client.getWatermarkOffsets(topic, partition), true);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Store offset for topic partition.
|
||||||
|
*
|
||||||
|
* The offset will be committed (written) to the offset store according to the auto commit interval,
|
||||||
|
* if auto commit is on, or next manual offset if not.
|
||||||
|
*
|
||||||
|
* enable.auto.offset.store must be set to false to use this API,
|
||||||
|
*
|
||||||
|
* @see https://github.com/edenhill/librdkafka/blob/261371dc0edef4cea9e58a076c8e8aa7dc50d452/src-cpp/rdkafkacpp.h#L1702
|
||||||
|
*
|
||||||
|
* @param {Array.<TopicPartition>} topicPartitions - Topic partitions with offsets to store offsets for.
|
||||||
|
* @throws {LibrdKafkaError} - Throws when there is no offset stored
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.offsetsStore = function(topicPartitions) {
|
||||||
|
if (!this.isConnected()) {
|
||||||
|
throw new Error('Client is disconnected');
|
||||||
|
}
|
||||||
|
|
||||||
|
return this._errorWrap(this._client.offsetsStore(topicPartitions), true);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resume consumption for the provided list of partitions.
|
||||||
|
*
|
||||||
|
* @param {Array.<TopicPartition>} topicPartitions - List of topic partitions to resume consumption on.
|
||||||
|
* @throws {LibrdKafkaError} - Throws when there is no offset stored
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.resume = function(topicPartitions) {
|
||||||
|
if (!this.isConnected()) {
|
||||||
|
throw new Error('Client is disconnected');
|
||||||
|
}
|
||||||
|
|
||||||
|
return this._errorWrap(this._client.resume(topicPartitions), true);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pause producing or consumption for the provided list of partitions.
|
||||||
|
*
|
||||||
|
* @param {Array.<TopicPartition>} topicPartitions - List of topics to pause consumption on.
|
||||||
|
* @throws {LibrdKafkaError} - Throws when there is no offset stored
|
||||||
|
*/
|
||||||
|
KafkaConsumer.prototype.pause = function(topicPartitions) {
|
||||||
|
if (!this.isConnected()) {
|
||||||
|
throw new Error('Client is disconnected');
|
||||||
|
}
|
||||||
|
|
||||||
|
return this._errorWrap(this._client.pause(topicPartitions), true);
|
||||||
|
};
|
307
lib/producer-stream.js
Normal file
307
lib/producer-stream.js
Normal file
@ -0,0 +1,307 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
'use strict';
|
||||||
|
|
||||||
|
module.exports = ProducerStream;
|
||||||
|
|
||||||
|
var Writable = require('stream').Writable;
|
||||||
|
var util = require('util');
|
||||||
|
var ErrorCode = require('./error').codes;
|
||||||
|
|
||||||
|
util.inherits(ProducerStream, Writable);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writable stream integrating with the Kafka Producer.
|
||||||
|
*
|
||||||
|
* This class is used to write data to Kafka in a streaming way. It takes
|
||||||
|
* buffers of data and puts them into the appropriate Kafka topic. If you need
|
||||||
|
* finer control over partitions or keys, this is probably not the class for
|
||||||
|
* you. In that situation just use the Producer itself.
|
||||||
|
*
|
||||||
|
* The stream detects if Kafka is already connected. You can safely begin
|
||||||
|
* writing right away.
|
||||||
|
*
|
||||||
|
* This stream does not operate in Object mode and can only be given buffers.
|
||||||
|
*
|
||||||
|
* @param {Producer} producer - The Kafka Producer object.
|
||||||
|
* @param {array} topics - Array of topics
|
||||||
|
* @param {object} options - Topic configuration.
|
||||||
|
* @constructor
|
||||||
|
* @extends stream.Writable
|
||||||
|
*/
|
||||||
|
function ProducerStream(producer, options) {
|
||||||
|
if (!(this instanceof ProducerStream)) {
|
||||||
|
return new ProducerStream(producer, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options === undefined) {
|
||||||
|
options = {};
|
||||||
|
} else if (typeof options === 'string') {
|
||||||
|
options = { encoding: options };
|
||||||
|
} else if (options === null || typeof options !== 'object') {
|
||||||
|
throw new TypeError('"streamOptions" argument must be a string or an object');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.objectMode && !options.topic) {
|
||||||
|
throw new TypeError('ProducerStreams not using objectMode must provide a topic to produce to.');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options.objectMode !== true) {
|
||||||
|
this._write = this._write_buffer;
|
||||||
|
} else {
|
||||||
|
this._write = this._write_message;
|
||||||
|
}
|
||||||
|
|
||||||
|
Writable.call(this, options);
|
||||||
|
|
||||||
|
this.producer = producer;
|
||||||
|
this.topicName = options.topic;
|
||||||
|
|
||||||
|
this.autoClose = options.autoClose === undefined ? true : !!options.autoClose;
|
||||||
|
this.connectOptions = options.connectOptions || {};
|
||||||
|
|
||||||
|
this.producer.setPollInterval(options.pollInterval || 1000);
|
||||||
|
|
||||||
|
if (options.encoding) {
|
||||||
|
this.setDefaultEncoding(options.encoding);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect to the producer. Unless we are already connected
|
||||||
|
if (!this.producer.isConnected()) {
|
||||||
|
this.connect(this.connectOptions);
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
this.once('finish', function() {
|
||||||
|
if (this.autoClose) {
|
||||||
|
this.close();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
ProducerStream.prototype.connect = function(options) {
|
||||||
|
this.producer.connect(options, function(err, data) {
|
||||||
|
if (err) {
|
||||||
|
this.emit('error', err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
}.bind(this));
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal stream write method for ProducerStream when writing buffers.
|
||||||
|
*
|
||||||
|
* This method should never be called externally. It has some recursion to
|
||||||
|
* handle cases where the producer is not yet connected.
|
||||||
|
*
|
||||||
|
* @param {buffer} chunk - Chunk to write.
|
||||||
|
* @param {string} encoding - Encoding for the buffer
|
||||||
|
* @param {Function} cb - Callback to call when the stream is done processing
|
||||||
|
* the data.
|
||||||
|
* @private
|
||||||
|
* @see https://github.com/nodejs/node/blob/master/lib/fs.js#L1901
|
||||||
|
*/
|
||||||
|
ProducerStream.prototype._write_buffer = function(data, encoding, cb) {
|
||||||
|
if (!(data instanceof Buffer)) {
|
||||||
|
this.emit('error', new Error('Invalid data. Can only produce buffers'));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
if (!this.producer.isConnected()) {
|
||||||
|
this.producer.once('ready', function() {
|
||||||
|
self._write(data, encoding, cb);
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
this.producer.produce(self.topicName, null, data, null);
|
||||||
|
setImmediate(cb);
|
||||||
|
} catch (e) {
|
||||||
|
if (ErrorCode.ERR__QUEUE_FULL === e.code) {
|
||||||
|
// Poll for good measure
|
||||||
|
self.producer.poll();
|
||||||
|
|
||||||
|
// Just delay this thing a bit and pass the params
|
||||||
|
// backpressure will get exerted this way.
|
||||||
|
setTimeout(function() {
|
||||||
|
self._write(data, encoding, cb);
|
||||||
|
}, 500);
|
||||||
|
} else {
|
||||||
|
if (self.autoClose) {
|
||||||
|
self.close();
|
||||||
|
}
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(e);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal stream write method for ProducerStream when writing objects.
|
||||||
|
*
|
||||||
|
* This method should never be called externally. It has some recursion to
|
||||||
|
* handle cases where the producer is not yet connected.
|
||||||
|
*
|
||||||
|
* @param {object} message - Message to write.
|
||||||
|
* @param {string} encoding - Encoding for the buffer
|
||||||
|
* @param {Function} cb - Callback to call when the stream is done processing
|
||||||
|
* the data.
|
||||||
|
* @private
|
||||||
|
* @see https://github.com/nodejs/node/blob/master/lib/fs.js#L1901
|
||||||
|
*/
|
||||||
|
ProducerStream.prototype._write_message = function(message, encoding, cb) {
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
if (!this.producer.isConnected()) {
|
||||||
|
this.producer.once('ready', function() {
|
||||||
|
self._write(message, encoding, cb);
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
this.producer.produce(message.topic, message.partition, message.value, message.key, message.timestamp, message.opaque, message.headers);
|
||||||
|
setImmediate(cb);
|
||||||
|
} catch (e) {
|
||||||
|
if (ErrorCode.ERR__QUEUE_FULL === e.code) {
|
||||||
|
// Poll for good measure
|
||||||
|
self.producer.poll();
|
||||||
|
|
||||||
|
// Just delay this thing a bit and pass the params
|
||||||
|
// backpressure will get exerted this way.
|
||||||
|
setTimeout(function() {
|
||||||
|
self._write(message, encoding, cb);
|
||||||
|
}, 500);
|
||||||
|
} else {
|
||||||
|
if (self.autoClose) {
|
||||||
|
self.close();
|
||||||
|
}
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(e);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
function writev(producer, topic, chunks, cb) {
|
||||||
|
|
||||||
|
// @todo maybe a produce batch method?
|
||||||
|
var doneCount = 0;
|
||||||
|
var err = null;
|
||||||
|
var chunk = null;
|
||||||
|
|
||||||
|
function maybeDone(e) {
|
||||||
|
if (e) {
|
||||||
|
err = e;
|
||||||
|
}
|
||||||
|
doneCount ++;
|
||||||
|
if (doneCount === chunks.length) {
|
||||||
|
cb(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function retry(restChunks) {
|
||||||
|
// Poll for good measure
|
||||||
|
producer.poll();
|
||||||
|
|
||||||
|
// Just delay this thing a bit and pass the params
|
||||||
|
// backpressure will get exerted this way.
|
||||||
|
setTimeout(function() {
|
||||||
|
writev(producer, topic, restChunks, cb);
|
||||||
|
}, 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var i = 0; i < chunks.length; i++) {
|
||||||
|
chunk = chunks[i];
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (Buffer.isBuffer(chunk)) {
|
||||||
|
producer.produce(topic, null, chunk, null);
|
||||||
|
} else {
|
||||||
|
producer.produce(chunk.topic, chunk.partition, chunk.value, chunk.key, chunk.timestamp, chunk.opaque, chunk.headers);
|
||||||
|
}
|
||||||
|
maybeDone();
|
||||||
|
} catch (e) {
|
||||||
|
if (ErrorCode.ERR__QUEUE_FULL === e.code) {
|
||||||
|
retry(chunks.slice(i));
|
||||||
|
} else {
|
||||||
|
cb(e);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
ProducerStream.prototype._writev = function(data, cb) {
|
||||||
|
if (!this.producer.isConnected()) {
|
||||||
|
this.once('ready', function() {
|
||||||
|
this._writev(data, cb);
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
var len = data.length;
|
||||||
|
var chunks = new Array(len);
|
||||||
|
var size = 0;
|
||||||
|
|
||||||
|
for (var i = 0; i < len; i++) {
|
||||||
|
var chunk = data[i].chunk;
|
||||||
|
|
||||||
|
chunks[i] = chunk;
|
||||||
|
size += chunk.length;
|
||||||
|
}
|
||||||
|
|
||||||
|
writev(this.producer, this.topicName, chunks, function(err) {
|
||||||
|
if (err) {
|
||||||
|
self.close();
|
||||||
|
cb(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
ProducerStream.prototype.close = function(cb) {
|
||||||
|
var self = this;
|
||||||
|
if (cb) {
|
||||||
|
this.once('close', cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use interval variables in here
|
||||||
|
if (self.producer._isConnected) {
|
||||||
|
self.producer.disconnect(function() {
|
||||||
|
// Previously this set the producer to null. I'm not sure there is any benefit
|
||||||
|
// to that other than I guess helping flag it for GC?
|
||||||
|
// https://github.com/Blizzard/node-rdkafka/issues/344
|
||||||
|
close();
|
||||||
|
});
|
||||||
|
} else if (self.producer._isConnecting){
|
||||||
|
self.producer.once('ready', function() {
|
||||||
|
// Don't pass CB this time because it has already been passed
|
||||||
|
self.close();
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
setImmediate(close);
|
||||||
|
}
|
||||||
|
|
||||||
|
function close() {
|
||||||
|
self.emit('close');
|
||||||
|
}
|
||||||
|
};
|
375
lib/producer.js
Normal file
375
lib/producer.js
Normal file
@ -0,0 +1,375 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
module.exports = Producer;
|
||||||
|
|
||||||
|
var Client = require('./client');
|
||||||
|
|
||||||
|
var util = require('util');
|
||||||
|
var Kafka = require('../librdkafka.js');
|
||||||
|
var ProducerStream = require('./producer-stream');
|
||||||
|
var LibrdKafkaError = require('./error');
|
||||||
|
var shallowCopy = require('./util').shallowCopy;
|
||||||
|
|
||||||
|
util.inherits(Producer, Client);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Producer class for sending messages to Kafka
|
||||||
|
*
|
||||||
|
* This is the main entry point for writing data to Kafka. You
|
||||||
|
* configure this like you do any other client, with a global
|
||||||
|
* configuration and default topic configuration.
|
||||||
|
*
|
||||||
|
* Once you instantiate this object, you need to connect to it first.
|
||||||
|
* This allows you to get the metadata and make sure the connection
|
||||||
|
* can be made before you depend on it. After that, problems with
|
||||||
|
* the connection will by brought down by using poll, which automatically
|
||||||
|
* runs when a transaction is made on the object.
|
||||||
|
*
|
||||||
|
* @param {object} conf - Key value pairs to configure the producer
|
||||||
|
* @param {object} topicConf - Key value pairs to create a default
|
||||||
|
* topic configuration
|
||||||
|
* @extends Client
|
||||||
|
* @constructor
|
||||||
|
*/
|
||||||
|
function Producer(conf, topicConf) {
|
||||||
|
if (!(this instanceof Producer)) {
|
||||||
|
return new Producer(conf, topicConf);
|
||||||
|
}
|
||||||
|
|
||||||
|
conf = shallowCopy(conf);
|
||||||
|
topicConf = shallowCopy(topicConf);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Producer message. This is sent to the wrapper, not received from it
|
||||||
|
*
|
||||||
|
* @typedef {object} Producer~Message
|
||||||
|
* @property {string|buffer} message - The buffer to send to Kafka.
|
||||||
|
* @property {Topic} topic - The Kafka topic to produce to.
|
||||||
|
* @property {number} partition - The partition to produce to. Defaults to
|
||||||
|
* the partitioner
|
||||||
|
* @property {string} key - The key string to use for the message.
|
||||||
|
* @see Consumer~Message
|
||||||
|
*/
|
||||||
|
|
||||||
|
var gTopic = conf.topic || false;
|
||||||
|
var gPart = conf.partition || null;
|
||||||
|
var dr_cb = conf.dr_cb || null;
|
||||||
|
var dr_msg_cb = conf.dr_msg_cb || null;
|
||||||
|
|
||||||
|
// delete keys we don't want to pass on
|
||||||
|
delete conf.topic;
|
||||||
|
delete conf.partition;
|
||||||
|
|
||||||
|
delete conf.dr_cb;
|
||||||
|
delete conf.dr_msg_cb;
|
||||||
|
|
||||||
|
// client is an initialized consumer object
|
||||||
|
// @see NodeKafka::Producer::Init
|
||||||
|
Client.call(this, conf, Kafka.Producer, topicConf);
|
||||||
|
|
||||||
|
// Delete these keys after saving them in vars
|
||||||
|
this.globalConfig = conf;
|
||||||
|
this.topicConfig = topicConf;
|
||||||
|
this.defaultTopic = gTopic || null;
|
||||||
|
this.defaultPartition = gPart == null ? -1 : gPart;
|
||||||
|
|
||||||
|
this.sentMessages = 0;
|
||||||
|
|
||||||
|
this.pollInterval = undefined;
|
||||||
|
|
||||||
|
if (dr_msg_cb || dr_cb) {
|
||||||
|
this._cb_configs.event.delivery_cb = function(err, report) {
|
||||||
|
if (err) {
|
||||||
|
err = LibrdKafkaError.create(err);
|
||||||
|
}
|
||||||
|
this.emit('delivery-report', err, report);
|
||||||
|
}.bind(this);
|
||||||
|
this._cb_configs.event.delivery_cb.dr_msg_cb = !!dr_msg_cb;
|
||||||
|
|
||||||
|
if (typeof dr_cb === 'function') {
|
||||||
|
this.on('delivery-report', dr_cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Produce a message to Kafka synchronously.
|
||||||
|
*
|
||||||
|
* This is the method mainly used in this class. Use it to produce
|
||||||
|
* a message to Kafka.
|
||||||
|
*
|
||||||
|
* When this is sent off, there is no guarantee it is delivered. If you need
|
||||||
|
* guaranteed delivery, change your *acks* settings, or use delivery reports.
|
||||||
|
*
|
||||||
|
* @param {string} topic - The topic name to produce to.
|
||||||
|
* @param {number|null} partition - The partition number to produce to.
|
||||||
|
* @param {Buffer|null} message - The message to produce.
|
||||||
|
* @param {string} key - The key associated with the message.
|
||||||
|
* @param {number|null} timestamp - Timestamp to send with the message.
|
||||||
|
* @param {object} opaque - An object you want passed along with this message, if provided.
|
||||||
|
* @param {object} headers - A list of custom key value pairs that provide message metadata.
|
||||||
|
* @throws {LibrdKafkaError} - Throws a librdkafka error if it failed.
|
||||||
|
* @return {boolean} - returns an error if it failed, or true if not
|
||||||
|
* @see Producer#produce
|
||||||
|
*/
|
||||||
|
Producer.prototype.produce = function(topic, partition, message, key, timestamp, opaque, headers) {
|
||||||
|
if (!this._isConnected) {
|
||||||
|
throw new Error('Producer not connected');
|
||||||
|
}
|
||||||
|
|
||||||
|
// I have removed support for using a topic object. It is going to be removed
|
||||||
|
// from librdkafka soon, and it causes issues with shutting down
|
||||||
|
if (!topic || typeof topic !== 'string') {
|
||||||
|
throw new TypeError('"topic" must be a string');
|
||||||
|
}
|
||||||
|
|
||||||
|
this.sentMessages++;
|
||||||
|
|
||||||
|
partition = partition == null ? this.defaultPartition : partition;
|
||||||
|
|
||||||
|
return this._errorWrap(
|
||||||
|
this._client.produce(topic, partition, message, key, timestamp, opaque, headers));
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a write stream interface for a producer.
|
||||||
|
*
|
||||||
|
* This stream does not run in object mode. It only takes buffers of data.
|
||||||
|
*
|
||||||
|
* @param {object} conf - Key value pairs to configure the producer
|
||||||
|
* @param {object} topicConf - Key value pairs to create a default
|
||||||
|
* topic configuration
|
||||||
|
* @param {object} streamOptions - Stream options
|
||||||
|
* @return {ProducerStream} - returns the write stream for writing to Kafka.
|
||||||
|
*/
|
||||||
|
Producer.createWriteStream = function(conf, topicConf, streamOptions) {
|
||||||
|
var producer = new Producer(conf, topicConf);
|
||||||
|
return new ProducerStream(producer, streamOptions);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Poll for events
|
||||||
|
*
|
||||||
|
* We need to run poll in order to learn about new events that have occurred.
|
||||||
|
* This is no longer done automatically when we produce, so we need to run
|
||||||
|
* it manually, or set the producer to automatically poll.
|
||||||
|
*
|
||||||
|
* @return {Producer} - returns itself.
|
||||||
|
*/
|
||||||
|
Producer.prototype.poll = function() {
|
||||||
|
if (!this._isConnected) {
|
||||||
|
throw new Error('Producer not connected');
|
||||||
|
}
|
||||||
|
this._client.poll();
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set automatic polling for events.
|
||||||
|
*
|
||||||
|
* We need to run poll in order to learn about new events that have occurred.
|
||||||
|
* If you would like this done on an interval with disconnects and reconnections
|
||||||
|
* managed, you can do it here
|
||||||
|
*
|
||||||
|
* @param {number} interval - Interval, in milliseconds, to poll
|
||||||
|
*
|
||||||
|
* @return {Producer} - returns itself.
|
||||||
|
*/
|
||||||
|
Producer.prototype.setPollInterval = function(interval) {
|
||||||
|
// If we already have a poll interval we need to stop it
|
||||||
|
if (this.pollInterval) {
|
||||||
|
clearInterval(this.pollInterval);
|
||||||
|
this.pollInterval = undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (interval === 0) {
|
||||||
|
// If the interval was set to 0, bail out. We don't want to process this.
|
||||||
|
// If there was an interval previously set, it has been removed.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
// Now we want to make sure we are connected.
|
||||||
|
if (!this._isConnected) {
|
||||||
|
// If we are not, execute this once the connection goes through.
|
||||||
|
this.once('ready', function() {
|
||||||
|
self.setPollInterval(interval);
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We know we are connected at this point.
|
||||||
|
// Unref this interval
|
||||||
|
this.pollInterval = setInterval(function() {
|
||||||
|
try {
|
||||||
|
self.poll();
|
||||||
|
} catch (e) {
|
||||||
|
// We can probably ignore errors here as far as broadcasting.
|
||||||
|
// Disconnection issues will get handled below
|
||||||
|
}
|
||||||
|
}, interval).unref();
|
||||||
|
|
||||||
|
// Handle disconnections
|
||||||
|
this.once('disconnected', function() {
|
||||||
|
// Just rerun this function with interval 0. If any
|
||||||
|
// poll interval is set, this will remove it
|
||||||
|
self.setPollInterval(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flush the producer
|
||||||
|
*
|
||||||
|
* Flush everything on the internal librdkafka producer buffer. Do this before
|
||||||
|
* disconnects usually
|
||||||
|
*
|
||||||
|
* @param {number} timeout - Number of milliseconds to try to flush before giving up.
|
||||||
|
* @param {function} callback - Callback to fire when the flush is done.
|
||||||
|
*
|
||||||
|
* @return {Producer} - returns itself.
|
||||||
|
*/
|
||||||
|
Producer.prototype.flush = function(timeout, callback) {
|
||||||
|
if (!this._isConnected) {
|
||||||
|
throw new Error('Producer not connected');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (timeout === undefined || timeout === null) {
|
||||||
|
timeout = 500;
|
||||||
|
}
|
||||||
|
|
||||||
|
this._client.flush(timeout, function(err) {
|
||||||
|
if (err) {
|
||||||
|
err = LibrdKafkaError.create(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (callback) {
|
||||||
|
callback(err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Save the base disconnect method here so we can overwrite it and add a flush
|
||||||
|
*/
|
||||||
|
Producer.prototype._disconnect = Producer.prototype.disconnect;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disconnect the producer
|
||||||
|
*
|
||||||
|
* Flush everything on the internal librdkafka producer buffer. Then disconnect
|
||||||
|
*
|
||||||
|
* @param {number} timeout - Number of milliseconds to try to flush before giving up, defaults to 5 seconds.
|
||||||
|
* @param {function} cb - The callback to fire when
|
||||||
|
*/
|
||||||
|
Producer.prototype.disconnect = function(timeout, cb) {
|
||||||
|
var self = this;
|
||||||
|
var timeoutInterval = 5000;
|
||||||
|
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
} else {
|
||||||
|
timeoutInterval = timeout;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.flush(timeoutInterval, function() {
|
||||||
|
self._disconnect(cb);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Init a transaction.
|
||||||
|
*
|
||||||
|
* Initialize transactions, this is only performed once per transactional producer.
|
||||||
|
*
|
||||||
|
* @param {number} timeout - Number of milliseconds to try to initialize before giving up, defaults to 5 seconds.
|
||||||
|
* @param {function} cb - Callback to return when operation is completed
|
||||||
|
* @return {Producer} - returns itself.
|
||||||
|
*/
|
||||||
|
Producer.prototype.initTransactions = function(timeout, cb) {
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
this._client.initTransactions(timeout, function(err) {
|
||||||
|
cb(err ? LibrdKafkaError.create(err) : err);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Begin a transaction.
|
||||||
|
*
|
||||||
|
* 'initTransaction' must have been called successfully (once) before this function is called.
|
||||||
|
*
|
||||||
|
* @return {Producer} - returns itself.
|
||||||
|
*/
|
||||||
|
Producer.prototype.beginTransaction = function(cb) {
|
||||||
|
this._client.beginTransaction(function(err) {
|
||||||
|
cb(err ? LibrdKafkaError.create(err) : err);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit the current transaction (as started with 'beginTransaction').
|
||||||
|
*
|
||||||
|
* @param {number} timeout - Number of milliseconds to try to commit before giving up, defaults to 5 seconds
|
||||||
|
* @param {function} cb - Callback to return when operation is completed
|
||||||
|
* @return {Producer} - returns itself.
|
||||||
|
*/
|
||||||
|
Producer.prototype.commitTransaction = function(timeout, cb) {
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
this._client.commitTransaction(timeout, function(err) {
|
||||||
|
cb(err ? LibrdKafkaError.create(err) : err);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Aborts the ongoing transaction.
|
||||||
|
*
|
||||||
|
* @param {number} timeout - Number of milliseconds to try to abort, defaults to 5 seconds
|
||||||
|
* @param {function} cb - Callback to return when operation is completed
|
||||||
|
* @return {Producer} - returns itself.
|
||||||
|
*/
|
||||||
|
Producer.prototype.abortTransaction = function(timeout, cb) {
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
this._client.abortTransaction(timeout, function(err) {
|
||||||
|
cb(err ? LibrdKafkaError.create(err) : err);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send the current offsets of the consumer to the ongoing transaction.
|
||||||
|
*
|
||||||
|
* @param {number} offsets - Offsets to send as part of the next commit
|
||||||
|
* @param {Consumer} consumer - An instance of the consumer
|
||||||
|
* @param {number} timeout - Number of milliseconds to try to send offsets, defaults to 5 seconds
|
||||||
|
* @param {function} cb - Callback to return when operation is completed
|
||||||
|
* @return {Producer} - returns itself.
|
||||||
|
*/
|
||||||
|
Producer.prototype.sendOffsetsToTransaction = function(offsets, consumer, timeout, cb) {
|
||||||
|
if (typeof timeout === 'function') {
|
||||||
|
cb = timeout;
|
||||||
|
timeout = 5000;
|
||||||
|
}
|
||||||
|
this._client.sendOffsetsToTransaction(offsets, consumer.getClient(), timeout, function(err) {
|
||||||
|
cb(err ? LibrdKafkaError.create(err) : err);
|
||||||
|
});
|
||||||
|
};
|
323
lib/producer/high-level-producer.js
Normal file
323
lib/producer/high-level-producer.js
Normal file
@ -0,0 +1,323 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
module.exports = HighLevelProducer;
|
||||||
|
|
||||||
|
var util = require('util');
|
||||||
|
var Producer = require('../producer');
|
||||||
|
var LibrdKafkaError = require('../error');
|
||||||
|
var EventEmitter = require('events').EventEmitter;
|
||||||
|
var RefCounter = require('../tools/ref-counter');
|
||||||
|
var shallowCopy = require('../util').shallowCopy;
|
||||||
|
var isObject = require('../util').isObject;
|
||||||
|
|
||||||
|
util.inherits(HighLevelProducer, Producer);
|
||||||
|
|
||||||
|
var noopSerializer = createSerializer(function (v) { return v; });
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a serializer
|
||||||
|
*
|
||||||
|
* Method simply wraps a serializer provided by a user
|
||||||
|
* so it adds context to the error
|
||||||
|
*
|
||||||
|
* @returns {function} Serialization function
|
||||||
|
*/
|
||||||
|
function createSerializer(serializer) {
|
||||||
|
var applyFn = function serializationWrapper(v, cb) {
|
||||||
|
try {
|
||||||
|
return cb ? serializer(v, cb) : serializer(v);
|
||||||
|
} catch (e) {
|
||||||
|
var modifiedError = new Error('Could not serialize value: ' + e.message);
|
||||||
|
modifiedError.value = v;
|
||||||
|
modifiedError.serializer = serializer;
|
||||||
|
throw modifiedError;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// We can check how many parameters the function has and activate the asynchronous
|
||||||
|
// operation if the number of parameters the function accepts is > 1
|
||||||
|
return {
|
||||||
|
apply: applyFn,
|
||||||
|
async: serializer.length > 1
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Producer class for sending messages to Kafka in a higher level fashion
|
||||||
|
*
|
||||||
|
* This is the main entry point for writing data to Kafka if you want more
|
||||||
|
* functionality than librdkafka supports out of the box. You
|
||||||
|
* configure this like you do any other client, with a global
|
||||||
|
* configuration and default topic configuration.
|
||||||
|
*
|
||||||
|
* Once you instantiate this object, you need to connect to it first.
|
||||||
|
* This allows you to get the metadata and make sure the connection
|
||||||
|
* can be made before you depend on it. After that, problems with
|
||||||
|
* the connection will by brought down by using poll, which automatically
|
||||||
|
* runs when a transaction is made on the object.
|
||||||
|
*
|
||||||
|
* This has a few restrictions, so it is not for free!
|
||||||
|
*
|
||||||
|
* 1. You may not define opaque tokens
|
||||||
|
* The higher level producer is powered by opaque tokens.
|
||||||
|
* 2. Every message ack will dispatch an event on the node thread.
|
||||||
|
* 3. Will use a ref counter to determine if there are outgoing produces.
|
||||||
|
*
|
||||||
|
* This will return the new object you should use instead when doing your
|
||||||
|
* produce calls
|
||||||
|
*
|
||||||
|
* @param {object} conf - Key value pairs to configure the producer
|
||||||
|
* @param {object} topicConf - Key value pairs to create a default
|
||||||
|
* topic configuration
|
||||||
|
* @extends Producer
|
||||||
|
* @constructor
|
||||||
|
*/
|
||||||
|
function HighLevelProducer(conf, topicConf) {
|
||||||
|
if (!(this instanceof HighLevelProducer)) {
|
||||||
|
return new HighLevelProducer(conf, topicConf);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force this to be true for the high level producer
|
||||||
|
conf = shallowCopy(conf);
|
||||||
|
conf.dr_cb = true;
|
||||||
|
|
||||||
|
// producer is an initialized consumer object
|
||||||
|
// @see NodeKafka::Producer::Init
|
||||||
|
Producer.call(this, conf, topicConf);
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
// Add a delivery emitter to the producer
|
||||||
|
this._hl = {
|
||||||
|
deliveryEmitter: new EventEmitter(),
|
||||||
|
messageId: 0,
|
||||||
|
// Special logic for polling. We use a reference counter to know when we need
|
||||||
|
// to be doing it and when we can stop. This means when we go into fast polling
|
||||||
|
// mode we don't need to do multiple calls to poll since they all will yield
|
||||||
|
// the same result
|
||||||
|
pollingRefTimeout: null,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add the polling ref counter to the class which ensures we poll when we go active
|
||||||
|
this._hl.pollingRef = new RefCounter(function() {
|
||||||
|
self._hl.pollingRefTimeout = setInterval(function() {
|
||||||
|
try {
|
||||||
|
self.poll();
|
||||||
|
} catch (e) {
|
||||||
|
if (!self._isConnected) {
|
||||||
|
// If we got disconnected for some reason there is no point
|
||||||
|
// in polling anymore
|
||||||
|
clearInterval(self._hl.pollingRefTimeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, 1);
|
||||||
|
}, function() {
|
||||||
|
clearInterval(self._hl.pollingRefTimeout);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Default poll interval. More sophisticated polling is also done in create rule method
|
||||||
|
this.setPollInterval(1000);
|
||||||
|
|
||||||
|
// Listen to all delivery reports to propagate elements with a _message_id to the emitter
|
||||||
|
this.on('delivery-report', function(err, report) {
|
||||||
|
if (report.opaque && report.opaque.__message_id !== undefined) {
|
||||||
|
self._hl.deliveryEmitter.emit(report.opaque.__message_id, err, report.offset);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Save old produce here since we are making some modifications for it
|
||||||
|
this._oldProduce = this.produce;
|
||||||
|
this.produce = this._modifiedProduce;
|
||||||
|
|
||||||
|
// Serializer information
|
||||||
|
this.keySerializer = noopSerializer;
|
||||||
|
this.valueSerializer = noopSerializer;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Produce a message to Kafka asynchronously.
|
||||||
|
*
|
||||||
|
* This is the method mainly used in this class. Use it to produce
|
||||||
|
* a message to Kafka.
|
||||||
|
*
|
||||||
|
* When this is sent off, and you recieve your callback, the assurances afforded
|
||||||
|
* to you will be equal to those provided by your ack level.
|
||||||
|
*
|
||||||
|
* @param {string} topic - The topic name to produce to.
|
||||||
|
* @param {number|null} partition - The partition number to produce to.
|
||||||
|
* @param {Buffer|null} message - The message to produce.
|
||||||
|
* @param {string} key - The key associated with the message.
|
||||||
|
* @param {number|null} timestamp - Timestamp to send with the message.
|
||||||
|
* @param {object} headers - A list of custom key value pairs that provide message metadata.
|
||||||
|
* @param {function} callback - Callback to call when the delivery report is recieved.
|
||||||
|
* @throws {LibrdKafkaError} - Throws a librdkafka error if it failed.
|
||||||
|
* @return {boolean} - returns an error if it failed, or true if not
|
||||||
|
* @see Producer#produce
|
||||||
|
*/
|
||||||
|
HighLevelProducer.prototype._modifiedProduce = function(topic, partition, message, key, timestamp, headers, callback) {
|
||||||
|
// headers are optional
|
||||||
|
if (arguments.length === 6) {
|
||||||
|
callback = headers;
|
||||||
|
headers = undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the message id
|
||||||
|
var opaque = {
|
||||||
|
__message_id: this._hl.messageId++,
|
||||||
|
};
|
||||||
|
|
||||||
|
this._hl.pollingRef.increment();
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
var resolvedSerializedValue;
|
||||||
|
var resolvedSerializedKey;
|
||||||
|
var calledBack = false;
|
||||||
|
|
||||||
|
// Actually do the produce with new key and value based on deserialized
|
||||||
|
// results
|
||||||
|
function doProduce(v, k) {
|
||||||
|
try {
|
||||||
|
var r = self._oldProduce(topic, partition,
|
||||||
|
v, k,
|
||||||
|
timestamp, opaque, headers);
|
||||||
|
|
||||||
|
self._hl.deliveryEmitter.once(opaque.__message_id, function(err, offset) {
|
||||||
|
self._hl.pollingRef.decrement();
|
||||||
|
setImmediate(function() {
|
||||||
|
// Offset must be greater than or equal to 0 otherwise it is a null offset
|
||||||
|
// Possibly because we have acks off
|
||||||
|
callback(err, offset >= 0 ? offset : null);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return r;
|
||||||
|
} catch (e) {
|
||||||
|
callback(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function produceIfComplete() {
|
||||||
|
if (resolvedSerializedKey !== undefined && resolvedSerializedValue !== undefined) {
|
||||||
|
doProduce(resolvedSerializedValue, resolvedSerializedKey);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// To run on a promise if returned by the serializer
|
||||||
|
function finishSerializedValue(v) {
|
||||||
|
if (!calledBack) {
|
||||||
|
resolvedSerializedValue = v;
|
||||||
|
produceIfComplete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// To run on a promise of returned by the serializer
|
||||||
|
function finishSerializedKey(k) {
|
||||||
|
resolvedSerializedKey = k;
|
||||||
|
|
||||||
|
if (!calledBack) {
|
||||||
|
produceIfComplete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function failSerializedValue(err) {
|
||||||
|
if (!calledBack) {
|
||||||
|
calledBack = true;
|
||||||
|
callback(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function failSerializedKey(err) {
|
||||||
|
if (!calledBack) {
|
||||||
|
calledBack = true;
|
||||||
|
callback(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function valueSerializerCallback(err, v) {
|
||||||
|
if (err) {
|
||||||
|
failSerializedValue(err);
|
||||||
|
} else {
|
||||||
|
finishSerializedValue(v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function keySerializerCallback(err, v) {
|
||||||
|
if (err) {
|
||||||
|
failSerializedKey(err);
|
||||||
|
} else {
|
||||||
|
finishSerializedKey(v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (this.valueSerializer.async) {
|
||||||
|
// If this is async we need to give it a callback
|
||||||
|
this.valueSerializer.apply(message, valueSerializerCallback);
|
||||||
|
} else {
|
||||||
|
var serializedValue = this.valueSerializer.apply(message);
|
||||||
|
// Check if we were returned a promise in order to support promise behavior
|
||||||
|
if (serializedValue &&
|
||||||
|
typeof serializedValue.then === 'function' &&
|
||||||
|
typeof serializedValue.catch === 'function') {
|
||||||
|
// This is a promise. We need to hook into its then and catch
|
||||||
|
serializedValue.then(finishSerializedValue).catch(failSerializedValue);
|
||||||
|
} else {
|
||||||
|
resolvedSerializedValue = serializedValue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.keySerializer.async) {
|
||||||
|
// If this is async we need to give it a callback
|
||||||
|
this.keySerializer.apply(key, keySerializerCallback);
|
||||||
|
} else {
|
||||||
|
var serializedKey = this.keySerializer.apply(key);
|
||||||
|
// Check if we were returned a promise in order to support promise behavior
|
||||||
|
if (serializedKey &&
|
||||||
|
typeof serializedKey.then === 'function' &&
|
||||||
|
typeof serializedKey.catch === 'function') {
|
||||||
|
// This is a promise. We need to hook into its then and catch
|
||||||
|
serializedKey.then(finishSerializedKey).catch(failSerializedKey);
|
||||||
|
} else {
|
||||||
|
resolvedSerializedKey = serializedKey;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only do the produce here if we are complete. That is, if the key
|
||||||
|
// and value have been serialized.
|
||||||
|
produceIfComplete();
|
||||||
|
} catch (e) {
|
||||||
|
setImmediate(function() {
|
||||||
|
calledBack = true;
|
||||||
|
callback(e);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the key serializer
|
||||||
|
*
|
||||||
|
* This allows the value inside the produce call to differ from the value of the
|
||||||
|
* value actually produced to kafka. Good if, for example, you want to serialize
|
||||||
|
* it to a particular format.
|
||||||
|
*/
|
||||||
|
HighLevelProducer.prototype.setKeySerializer = function(serializer) {
|
||||||
|
this.keySerializer = createSerializer(serializer);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the value serializer
|
||||||
|
*
|
||||||
|
* This allows the value inside the produce call to differ from the value of the
|
||||||
|
* value actually produced to kafka. Good if, for example, you want to serialize
|
||||||
|
* it to a particular format.
|
||||||
|
*/
|
||||||
|
HighLevelProducer.prototype.setValueSerializer = function(serializer) {
|
||||||
|
this.valueSerializer = createSerializer(serializer);
|
||||||
|
};
|
52
lib/tools/ref-counter.js
Normal file
52
lib/tools/ref-counter.js
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
module.exports = RefCounter;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ref counter class.
|
||||||
|
*
|
||||||
|
* Is used to basically determine active/inactive and allow callbacks that
|
||||||
|
* hook into each.
|
||||||
|
*
|
||||||
|
* For the producer, it is used to begin rapid polling after a produce until
|
||||||
|
* the delivery report is dispatched.
|
||||||
|
*/
|
||||||
|
function RefCounter(onActive, onPassive) {
|
||||||
|
this.context = {};
|
||||||
|
this.onActive = onActive;
|
||||||
|
this.onPassive = onPassive;
|
||||||
|
this.currentValue = 0;
|
||||||
|
this.isRunning = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Increment the ref counter
|
||||||
|
*/
|
||||||
|
RefCounter.prototype.increment = function() {
|
||||||
|
this.currentValue += 1;
|
||||||
|
|
||||||
|
// If current value exceeds 0, activate the start
|
||||||
|
if (this.currentValue > 0 && !this.isRunning) {
|
||||||
|
this.isRunning = true;
|
||||||
|
this.onActive(this.context);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decrement the ref counter
|
||||||
|
*/
|
||||||
|
RefCounter.prototype.decrement = function() {
|
||||||
|
this.currentValue -= 1;
|
||||||
|
|
||||||
|
if (this.currentValue <= 0 && this.isRunning) {
|
||||||
|
this.isRunning = false;
|
||||||
|
this.onPassive(this.context);
|
||||||
|
}
|
||||||
|
};
|
88
lib/topic-partition.js
Normal file
88
lib/topic-partition.js
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Topic = require('./topic');
|
||||||
|
|
||||||
|
module.exports = TopicPartition;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Map an array of topic partition js objects to real topic partition objects.
|
||||||
|
*
|
||||||
|
* @param array The array of topic partition raw objects to map to topic
|
||||||
|
* partition objects
|
||||||
|
*/
|
||||||
|
TopicPartition.map = function(array) {
|
||||||
|
return array.map(function(element) {
|
||||||
|
return TopicPartition.create(element);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Take a topic partition javascript object and convert it to the class.
|
||||||
|
* The class will automatically convert offset identifiers to special constants
|
||||||
|
*
|
||||||
|
* @param element The topic partition raw javascript object
|
||||||
|
*/
|
||||||
|
TopicPartition.create = function(element) {
|
||||||
|
// Just ensure we take something that can have properties. The topic partition
|
||||||
|
// class will
|
||||||
|
element = element || {};
|
||||||
|
return new TopicPartition(element.topic, element.partition, element.offset);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a topic partition. Just does some validation and decoration
|
||||||
|
* on topic partitions provided.
|
||||||
|
*
|
||||||
|
* Goal is still to behave like a plain javascript object but with validation
|
||||||
|
* and potentially some extra methods
|
||||||
|
*/
|
||||||
|
function TopicPartition(topic, partition, offset) {
|
||||||
|
if (!(this instanceof TopicPartition)) {
|
||||||
|
return new TopicPartition(topic, partition, offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate that the elements we are iterating over are actual topic partition
|
||||||
|
// js objects. They do not need an offset, but they do need partition
|
||||||
|
if (!topic) {
|
||||||
|
throw new TypeError('"topic" must be a string and must be set');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (partition === null || partition === undefined) {
|
||||||
|
throw new TypeError('"partition" must be a number and must set');
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can just set topic and partition as they stand.
|
||||||
|
this.topic = topic;
|
||||||
|
this.partition = partition;
|
||||||
|
|
||||||
|
if (offset === undefined || offset === null) {
|
||||||
|
this.offset = Topic.OFFSET_STORED;
|
||||||
|
} else if (typeof offset === 'string') {
|
||||||
|
switch (offset.toLowerCase()) {
|
||||||
|
case 'earliest':
|
||||||
|
case 'beginning':
|
||||||
|
this.offset = Topic.OFFSET_BEGINNING;
|
||||||
|
break;
|
||||||
|
case 'latest':
|
||||||
|
case 'end':
|
||||||
|
this.offset = Topic.OFFSET_END;
|
||||||
|
break;
|
||||||
|
case 'stored':
|
||||||
|
this.offset = Topic.OFFSET_STORED;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new TypeError('"offset", if provided as a string, must be beginning, end, or stored.');
|
||||||
|
}
|
||||||
|
} else if (typeof offset === 'number') {
|
||||||
|
this.offset = offset;
|
||||||
|
} else {
|
||||||
|
throw new TypeError('"offset" must be a special string or number if it is set');
|
||||||
|
}
|
||||||
|
}
|
42
lib/topic.js
Normal file
42
lib/topic.js
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var librdkafka = require('../librdkafka');
|
||||||
|
|
||||||
|
module.exports = Topic;
|
||||||
|
|
||||||
|
var topicKey = 'RdKafka::Topic::';
|
||||||
|
var topicKeyLength = topicKey.length;
|
||||||
|
|
||||||
|
// Take all of the topic special codes from librdkafka and add them
|
||||||
|
// to the object
|
||||||
|
// You can find this list in the C++ code at
|
||||||
|
// https://github.com/edenhill/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L1250
|
||||||
|
for (var key in librdkafka.topic) {
|
||||||
|
// Skip it if it doesn't start with ErrorCode
|
||||||
|
if (key.indexOf('RdKafka::Topic::') !== 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace/add it if there are any discrepancies
|
||||||
|
var newKey = key.substring(topicKeyLength);
|
||||||
|
Topic[newKey] = librdkafka.topic[key];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a topic. Just returns the string you gave it right now.
|
||||||
|
*
|
||||||
|
* Looks like a class, but all it does is return the topic name.
|
||||||
|
* This is so that one day if there are interface changes that allow
|
||||||
|
* different use of topic parameters, we can just add to this constructor and
|
||||||
|
* have it return something richer
|
||||||
|
*/
|
||||||
|
function Topic(topicName) {
|
||||||
|
return topicName;
|
||||||
|
}
|
29
lib/util.js
Normal file
29
lib/util.js
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var util = module.exports = {};
|
||||||
|
|
||||||
|
util.shallowCopy = function (obj) {
|
||||||
|
|
||||||
|
if (!util.isObject(obj)) { return obj; }
|
||||||
|
|
||||||
|
var copy = {};
|
||||||
|
|
||||||
|
for (var k in obj) {
|
||||||
|
if (obj.hasOwnProperty(k)) {
|
||||||
|
copy[k] = obj[k];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return copy;
|
||||||
|
};
|
||||||
|
|
||||||
|
util.isObject = function (obj) {
|
||||||
|
return obj && typeof obj === 'object';
|
||||||
|
};
|
12
librdkafka.js
Normal file
12
librdkafka.js
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var kafka = require('bindings')('node-librdkafka');
|
||||||
|
|
||||||
|
module.exports = kafka;
|
57
make_docs.sh
Executable file
57
make_docs.sh
Executable file
@ -0,0 +1,57 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [[ `git status --porcelain` ]]; then
|
||||||
|
# changes
|
||||||
|
>&2 echo "You have unstaged changes. Please commit before you run this."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# REPO=git@github.com:Blizzard/node-rdkafka.git
|
||||||
|
REPO=https://github.com/Blizzard/node-rdkafka.git
|
||||||
|
|
||||||
|
git remote add deploy $REPO
|
||||||
|
|
||||||
|
# Get the most recent stuff if we don't have it
|
||||||
|
git fetch deploy gh-pages || exit $?
|
||||||
|
|
||||||
|
make docs || exit $?
|
||||||
|
|
||||||
|
# Get package version and save to variable
|
||||||
|
|
||||||
|
PACKAGE=$(node -pe 'require("./package.json").name.split("/")[1]')
|
||||||
|
VERSION=$(node -pe 'require("./package.json").version')
|
||||||
|
|
||||||
|
# Make a temporary folder
|
||||||
|
|
||||||
|
TEMPDIR=$(mktemp -d)
|
||||||
|
|
||||||
|
VERSIONDIR="$TEMPDIR/$VERSION"
|
||||||
|
cp -r docs $VERSIONDIR
|
||||||
|
|
||||||
|
# Now, checkout the gh-pages, but first get current checked out branch
|
||||||
|
#
|
||||||
|
|
||||||
|
CURRENT_BRANCH=$(git rev-parse --symbolic-full-name --abbrev-ref HEAD)
|
||||||
|
|
||||||
|
COMMIT_MESSAGE=$(git log --pretty='format:%B' -1)
|
||||||
|
COMMIT_AUTHOR=$(git log --pretty='format:%aN <%aE>' -1)
|
||||||
|
|
||||||
|
if [[ `git checkout --quiet -b gh-pages deploy/gh-pages` ]]; then
|
||||||
|
>&2 echo "Could not checkout gh-pages"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf current
|
||||||
|
rm -rf $VERSION
|
||||||
|
|
||||||
|
cp -r $VERSIONDIR $VERSION
|
||||||
|
cp -r $VERSIONDIR current
|
||||||
|
|
||||||
|
git add --all
|
||||||
|
git commit --author="$COMMIT_AUTHOR" -m "Updated docs for '$COMMIT_MESSAGE'"
|
||||||
|
|
||||||
|
rm -rf $TEMPDIR
|
||||||
|
|
||||||
|
git push $REPO gh-pages || exit $?
|
||||||
|
|
||||||
|
git checkout $CURRENT_BRANCH
|
34
package.json
Normal file
34
package.json
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
"name": "node-kafka",
|
||||||
|
"version": "v0.12.0",
|
||||||
|
"description": "Node.js bindings for librdkafka",
|
||||||
|
"librdkafka": "1.6.1",
|
||||||
|
"main": "lib/index.js",
|
||||||
|
"author": "sentientgeeks",
|
||||||
|
"scripts": {
|
||||||
|
"configure": "node-gyp configure",
|
||||||
|
"build": "node-gyp build",
|
||||||
|
"test": "make test",
|
||||||
|
"install": "node-gyp rebuild",
|
||||||
|
"prepack": "node ./ci/prepublish.js"
|
||||||
|
},
|
||||||
|
"keywords": [
|
||||||
|
"kafka",
|
||||||
|
"librdkafka"
|
||||||
|
],
|
||||||
|
"devDependencies": {
|
||||||
|
"bluebird": "^3.5.3",
|
||||||
|
"jsdoc": "^3.4.0",
|
||||||
|
"jshint": "^2.10.1",
|
||||||
|
"mocha": "^5.2.0",
|
||||||
|
"node-gyp": "^5.1.0",
|
||||||
|
"toolkit-jsdoc": "^1.0.0"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"bindings": "^1.3.1",
|
||||||
|
"nan": "^2.14.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=6.0.0"
|
||||||
|
}
|
||||||
|
}
|
42
run_docker.sh
Executable file
42
run_docker.sh
Executable file
@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
COMPOSE_VERSION=$(docker-compose --version)
|
||||||
|
DOCKER_VERSION=$(docker --version)
|
||||||
|
|
||||||
|
# Start the docker compose file
|
||||||
|
echo "Running docker compose up. Docker version $DOCKER_VERSION. Compose version $COMPOSE_VERSION. "
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
if [ "$?" == "1" ]; then
|
||||||
|
echo "Failed to start docker images."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# List of topics to create in container
|
||||||
|
topics=(
|
||||||
|
"test"
|
||||||
|
"test2"
|
||||||
|
"test3"
|
||||||
|
"test4"
|
||||||
|
"test5"
|
||||||
|
"test6"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run docker-compose exec to make them
|
||||||
|
for topic in "${topics[@]}"
|
||||||
|
do
|
||||||
|
echo "Making topic $topic"
|
||||||
|
until docker-compose exec kafka \
|
||||||
|
kafka-topics --create --topic $topic --partitions 1 --replication-factor 1 --if-not-exists --zookeeper zookeeper:2181
|
||||||
|
do
|
||||||
|
topic_result="$?"
|
||||||
|
if [ "$topic_result" == "1" ]; then
|
||||||
|
echo "Bad status code: $topic_result. Trying again."
|
||||||
|
else
|
||||||
|
# If it is some unknown status code, die.
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
done
|
601
src/admin.cc
Normal file
601
src/admin.cc
Normal file
@ -0,0 +1,601 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <math.h>
|
||||||
|
|
||||||
|
#include "src/workers.h"
|
||||||
|
#include "src/admin.h"
|
||||||
|
|
||||||
|
using Nan::FunctionCallbackInfo;
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief AdminClient v8 wrapped object.
|
||||||
|
*
|
||||||
|
* Specializes the connection to wrap a consumer object through compositional
|
||||||
|
* inheritence. Establishes its prototype in node through `Init`
|
||||||
|
*
|
||||||
|
* @sa RdKafka::Handle
|
||||||
|
* @sa NodeKafka::Client
|
||||||
|
*/
|
||||||
|
|
||||||
|
AdminClient::AdminClient(Conf* gconfig):
|
||||||
|
Connection(gconfig, NULL) {
|
||||||
|
rkqu = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
AdminClient::~AdminClient() {
|
||||||
|
Disconnect();
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton AdminClient::Connect() {
|
||||||
|
std::string errstr;
|
||||||
|
|
||||||
|
{
|
||||||
|
scoped_shared_write_lock lock(m_connection_lock);
|
||||||
|
m_client = RdKafka::Producer::create(m_gconfig, errstr);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!m_client || !errstr.empty()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE, errstr);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rkqu == NULL) {
|
||||||
|
rkqu = rd_kafka_queue_new(m_client->c_ptr());
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton AdminClient::Disconnect() {
|
||||||
|
if (IsConnected()) {
|
||||||
|
scoped_shared_write_lock lock(m_connection_lock);
|
||||||
|
|
||||||
|
if (rkqu != NULL) {
|
||||||
|
rd_kafka_queue_destroy(rkqu);
|
||||||
|
rkqu = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
delete m_client;
|
||||||
|
m_client = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Persistent<v8::Function> AdminClient::constructor;
|
||||||
|
|
||||||
|
void AdminClient::Init(v8::Local<v8::Object> exports) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
v8::Local<v8::FunctionTemplate> tpl = Nan::New<v8::FunctionTemplate>(New);
|
||||||
|
tpl->SetClassName(Nan::New("AdminClient").ToLocalChecked());
|
||||||
|
tpl->InstanceTemplate()->SetInternalFieldCount(1);
|
||||||
|
|
||||||
|
// Admin client operations
|
||||||
|
Nan::SetPrototypeMethod(tpl, "createTopic", NodeCreateTopic);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "deleteTopic", NodeDeleteTopic);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "createPartitions", NodeCreatePartitions);
|
||||||
|
|
||||||
|
Nan::SetPrototypeMethod(tpl, "connect", NodeConnect);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect);
|
||||||
|
|
||||||
|
constructor.Reset(
|
||||||
|
(tpl->GetFunction(Nan::GetCurrentContext())).ToLocalChecked());
|
||||||
|
Nan::Set(exports, Nan::New("AdminClient").ToLocalChecked(),
|
||||||
|
tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked());
|
||||||
|
}
|
||||||
|
|
||||||
|
void AdminClient::New(const Nan::FunctionCallbackInfo<v8::Value>& info) {
|
||||||
|
if (!info.IsConstructCall()) {
|
||||||
|
return Nan::ThrowError("non-constructor invocation not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info.Length() < 1) {
|
||||||
|
return Nan::ThrowError("You must supply a global configuration");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[0]->IsObject()) {
|
||||||
|
return Nan::ThrowError("Global configuration data must be specified");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string errstr;
|
||||||
|
|
||||||
|
Conf* gconfig =
|
||||||
|
Conf::create(RdKafka::Conf::CONF_GLOBAL,
|
||||||
|
(info[0]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr);
|
||||||
|
|
||||||
|
if (!gconfig) {
|
||||||
|
return Nan::ThrowError(errstr.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
AdminClient* client = new AdminClient(gconfig);
|
||||||
|
|
||||||
|
// Wrap it
|
||||||
|
client->Wrap(info.This());
|
||||||
|
|
||||||
|
// Then there is some weird initialization that happens
|
||||||
|
// basically it sets the configuration data
|
||||||
|
// we don't need to do that because we lazy load it
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(info.This());
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Object> AdminClient::NewInstance(v8::Local<v8::Value> arg) {
|
||||||
|
Nan::EscapableHandleScope scope;
|
||||||
|
|
||||||
|
const unsigned argc = 1;
|
||||||
|
|
||||||
|
v8::Local<v8::Value> argv[argc] = { arg };
|
||||||
|
v8::Local<v8::Function> cons = Nan::New<v8::Function>(constructor);
|
||||||
|
v8::Local<v8::Object> instance =
|
||||||
|
Nan::NewInstance(cons, argc, argv).ToLocalChecked();
|
||||||
|
|
||||||
|
return scope.Escape(instance);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Poll for a particular event on a queue.
|
||||||
|
*
|
||||||
|
* This will keep polling until it gets an event of that type,
|
||||||
|
* given the number of tries and a timeout
|
||||||
|
*/
|
||||||
|
rd_kafka_event_t* PollForEvent(
|
||||||
|
rd_kafka_queue_t * topic_rkqu,
|
||||||
|
rd_kafka_event_type_t event_type,
|
||||||
|
int timeout_ms) {
|
||||||
|
// Initiate exponential timeout
|
||||||
|
int attempts = 1;
|
||||||
|
int exp_timeout_ms = timeout_ms;
|
||||||
|
if (timeout_ms > 2000) {
|
||||||
|
// measure optimal number of attempts
|
||||||
|
attempts = log10(timeout_ms / 1000) / log10(2) + 1;
|
||||||
|
// measure initial exponential timeout based on attempts
|
||||||
|
exp_timeout_ms = timeout_ms / (pow(2, attempts) - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
rd_kafka_event_t * event_response = nullptr;
|
||||||
|
|
||||||
|
// Poll the event queue until we get it
|
||||||
|
do {
|
||||||
|
// free previously fetched event
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
// poll and update attempts and exponential timeout
|
||||||
|
event_response = rd_kafka_queue_poll(topic_rkqu, exp_timeout_ms);
|
||||||
|
attempts = attempts - 1;
|
||||||
|
exp_timeout_ms = 2 * exp_timeout_ms;
|
||||||
|
} while (
|
||||||
|
rd_kafka_event_type(event_response) != event_type &&
|
||||||
|
attempts > 0);
|
||||||
|
|
||||||
|
// If this isn't the type of response we want, or if we do not have a response
|
||||||
|
// type, bail out with a null
|
||||||
|
if (event_response == NULL ||
|
||||||
|
rd_kafka_event_type(event_response) != event_type) {
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return event_response;
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton AdminClient::CreateTopic(rd_kafka_NewTopic_t* topic, int timeout_ms) {
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
scoped_shared_write_lock lock(m_connection_lock);
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make admin options to establish that we are creating topics
|
||||||
|
rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new(
|
||||||
|
m_client->c_ptr(), RD_KAFKA_ADMIN_OP_CREATETOPICS);
|
||||||
|
|
||||||
|
// Create queue just for this operation
|
||||||
|
rd_kafka_queue_t * topic_rkqu = rd_kafka_queue_new(m_client->c_ptr());
|
||||||
|
|
||||||
|
rd_kafka_CreateTopics(m_client->c_ptr(), &topic, 1, options, topic_rkqu);
|
||||||
|
|
||||||
|
// Poll for an event by type in that queue
|
||||||
|
rd_kafka_event_t * event_response = PollForEvent(
|
||||||
|
topic_rkqu,
|
||||||
|
RD_KAFKA_EVENT_CREATETOPICS_RESULT,
|
||||||
|
timeout_ms);
|
||||||
|
|
||||||
|
// Destroy the queue since we are done with it.
|
||||||
|
rd_kafka_queue_destroy(topic_rkqu);
|
||||||
|
|
||||||
|
// Destroy the options we just made because we polled already
|
||||||
|
rd_kafka_AdminOptions_destroy(options);
|
||||||
|
|
||||||
|
// If we got no response from that operation, this is a failure
|
||||||
|
// likely due to time out
|
||||||
|
if (event_response == NULL) {
|
||||||
|
return Baton(RdKafka::ERR__TIMED_OUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we can get the error code from the event
|
||||||
|
if (rd_kafka_event_error(event_response)) {
|
||||||
|
// If we had a special error code, get out of here with it
|
||||||
|
const rd_kafka_resp_err_t errcode = rd_kafka_event_error(event_response);
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(static_cast<RdKafka::ErrorCode>(errcode));
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the created results
|
||||||
|
const rd_kafka_CreateTopics_result_t * create_topic_results =
|
||||||
|
rd_kafka_event_CreateTopics_result(event_response);
|
||||||
|
|
||||||
|
size_t created_topic_count;
|
||||||
|
const rd_kafka_topic_result_t **restopics = rd_kafka_CreateTopics_result_topics( // NOLINT
|
||||||
|
create_topic_results,
|
||||||
|
&created_topic_count);
|
||||||
|
|
||||||
|
for (int i = 0 ; i < static_cast<int>(created_topic_count) ; i++) {
|
||||||
|
const rd_kafka_topic_result_t *terr = restopics[i];
|
||||||
|
const rd_kafka_resp_err_t errcode = rd_kafka_topic_result_error(terr);
|
||||||
|
const char *errmsg = rd_kafka_topic_result_error_string(terr);
|
||||||
|
|
||||||
|
if (errcode != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
|
if (errmsg) {
|
||||||
|
const std::string errormsg = std::string(errmsg);
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(static_cast<RdKafka::ErrorCode>(errcode), errormsg); // NOLINT
|
||||||
|
} else {
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(static_cast<RdKafka::ErrorCode>(errcode));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton AdminClient::DeleteTopic(rd_kafka_DeleteTopic_t* topic, int timeout_ms) {
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
scoped_shared_write_lock lock(m_connection_lock);
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make admin options to establish that we are deleting topics
|
||||||
|
rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new(
|
||||||
|
m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETETOPICS);
|
||||||
|
|
||||||
|
// Create queue just for this operation.
|
||||||
|
// May be worth making a "scoped queue" class or something like a lock
|
||||||
|
// for RAII
|
||||||
|
rd_kafka_queue_t * topic_rkqu = rd_kafka_queue_new(m_client->c_ptr());
|
||||||
|
|
||||||
|
rd_kafka_DeleteTopics(m_client->c_ptr(), &topic, 1, options, topic_rkqu);
|
||||||
|
|
||||||
|
// Poll for an event by type in that queue
|
||||||
|
rd_kafka_event_t * event_response = PollForEvent(
|
||||||
|
topic_rkqu,
|
||||||
|
RD_KAFKA_EVENT_DELETETOPICS_RESULT,
|
||||||
|
timeout_ms);
|
||||||
|
|
||||||
|
// Destroy the queue since we are done with it.
|
||||||
|
rd_kafka_queue_destroy(topic_rkqu);
|
||||||
|
|
||||||
|
// Destroy the options we just made because we polled already
|
||||||
|
rd_kafka_AdminOptions_destroy(options);
|
||||||
|
|
||||||
|
// If we got no response from that operation, this is a failure
|
||||||
|
// likely due to time out
|
||||||
|
if (event_response == NULL) {
|
||||||
|
return Baton(RdKafka::ERR__TIMED_OUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we can get the error code from the event
|
||||||
|
if (rd_kafka_event_error(event_response)) {
|
||||||
|
// If we had a special error code, get out of here with it
|
||||||
|
const rd_kafka_resp_err_t errcode = rd_kafka_event_error(event_response);
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(static_cast<RdKafka::ErrorCode>(errcode));
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the created results
|
||||||
|
const rd_kafka_DeleteTopics_result_t * delete_topic_results =
|
||||||
|
rd_kafka_event_DeleteTopics_result(event_response);
|
||||||
|
|
||||||
|
size_t deleted_topic_count;
|
||||||
|
const rd_kafka_topic_result_t **restopics = rd_kafka_DeleteTopics_result_topics( // NOLINT
|
||||||
|
delete_topic_results,
|
||||||
|
&deleted_topic_count);
|
||||||
|
|
||||||
|
for (int i = 0 ; i < static_cast<int>(deleted_topic_count) ; i++) {
|
||||||
|
const rd_kafka_topic_result_t *terr = restopics[i];
|
||||||
|
const rd_kafka_resp_err_t errcode = rd_kafka_topic_result_error(terr);
|
||||||
|
|
||||||
|
if (errcode != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(static_cast<RdKafka::ErrorCode>(errcode));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton AdminClient::CreatePartitions(
|
||||||
|
rd_kafka_NewPartitions_t* partitions,
|
||||||
|
int timeout_ms) {
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
scoped_shared_write_lock lock(m_connection_lock);
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make admin options to establish that we are deleting topics
|
||||||
|
rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new(
|
||||||
|
m_client->c_ptr(), RD_KAFKA_ADMIN_OP_CREATEPARTITIONS);
|
||||||
|
|
||||||
|
// Create queue just for this operation.
|
||||||
|
// May be worth making a "scoped queue" class or something like a lock
|
||||||
|
// for RAII
|
||||||
|
rd_kafka_queue_t * topic_rkqu = rd_kafka_queue_new(m_client->c_ptr());
|
||||||
|
|
||||||
|
rd_kafka_CreatePartitions(m_client->c_ptr(),
|
||||||
|
&partitions, 1, options, topic_rkqu);
|
||||||
|
|
||||||
|
// Poll for an event by type in that queue
|
||||||
|
rd_kafka_event_t * event_response = PollForEvent(
|
||||||
|
topic_rkqu,
|
||||||
|
RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT,
|
||||||
|
timeout_ms);
|
||||||
|
|
||||||
|
// Destroy the queue since we are done with it.
|
||||||
|
rd_kafka_queue_destroy(topic_rkqu);
|
||||||
|
|
||||||
|
// Destroy the options we just made because we polled already
|
||||||
|
rd_kafka_AdminOptions_destroy(options);
|
||||||
|
|
||||||
|
// If we got no response from that operation, this is a failure
|
||||||
|
// likely due to time out
|
||||||
|
if (event_response == NULL) {
|
||||||
|
return Baton(RdKafka::ERR__TIMED_OUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we can get the error code from the event
|
||||||
|
if (rd_kafka_event_error(event_response)) {
|
||||||
|
// If we had a special error code, get out of here with it
|
||||||
|
const rd_kafka_resp_err_t errcode = rd_kafka_event_error(event_response);
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(static_cast<RdKafka::ErrorCode>(errcode));
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the created results
|
||||||
|
const rd_kafka_CreatePartitions_result_t * create_partitions_results =
|
||||||
|
rd_kafka_event_CreatePartitions_result(event_response);
|
||||||
|
|
||||||
|
size_t created_partitions_topic_count;
|
||||||
|
const rd_kafka_topic_result_t **restopics = rd_kafka_CreatePartitions_result_topics( // NOLINT
|
||||||
|
create_partitions_results,
|
||||||
|
&created_partitions_topic_count);
|
||||||
|
|
||||||
|
for (int i = 0 ; i < static_cast<int>(created_partitions_topic_count) ; i++) { // NOLINT
|
||||||
|
const rd_kafka_topic_result_t *terr = restopics[i];
|
||||||
|
const rd_kafka_resp_err_t errcode = rd_kafka_topic_result_error(terr);
|
||||||
|
const char *errmsg = rd_kafka_topic_result_error_string(terr);
|
||||||
|
|
||||||
|
if (errcode != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
|
if (errmsg) {
|
||||||
|
const std::string errormsg = std::string(errmsg);
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(static_cast<RdKafka::ErrorCode>(errcode), errormsg); // NOLINT
|
||||||
|
} else {
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(static_cast<RdKafka::ErrorCode>(errcode));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rd_kafka_event_destroy(event_response);
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AdminClient::ActivateDispatchers() {
|
||||||
|
// Listen to global config
|
||||||
|
m_gconfig->listen();
|
||||||
|
|
||||||
|
// Listen to non global config
|
||||||
|
// tconfig->listen();
|
||||||
|
|
||||||
|
// This should be refactored to config based management
|
||||||
|
m_event_cb.dispatcher.Activate();
|
||||||
|
}
|
||||||
|
void AdminClient::DeactivateDispatchers() {
|
||||||
|
// Stop listening to the config dispatchers
|
||||||
|
m_gconfig->stop();
|
||||||
|
|
||||||
|
// Also this one
|
||||||
|
m_event_cb.dispatcher.Deactivate();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @section
|
||||||
|
* C++ Exported prototype functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
NAN_METHOD(AdminClient::NodeConnect) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
AdminClient* client = ObjectWrap::Unwrap<AdminClient>(info.This());
|
||||||
|
|
||||||
|
Baton b = client->Connect();
|
||||||
|
// Let the JS library throw if we need to so the error can be more rich
|
||||||
|
int error_code = static_cast<int>(b.err());
|
||||||
|
return info.GetReturnValue().Set(Nan::New<v8::Number>(error_code));
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(AdminClient::NodeDisconnect) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
AdminClient* client = ObjectWrap::Unwrap<AdminClient>(info.This());
|
||||||
|
|
||||||
|
Baton b = client->Disconnect();
|
||||||
|
// Let the JS library throw if we need to so the error can be more rich
|
||||||
|
int error_code = static_cast<int>(b.err());
|
||||||
|
return info.GetReturnValue().Set(Nan::New<v8::Number>(error_code));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create topic
|
||||||
|
*/
|
||||||
|
NAN_METHOD(AdminClient::NodeCreateTopic) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 3 || !info[2]->IsFunction()) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[1]->IsNumber()) {
|
||||||
|
return Nan::ThrowError("Must provide 'timeout'");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the final callback object
|
||||||
|
v8::Local<v8::Function> cb = info[2].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
AdminClient* client = ObjectWrap::Unwrap<AdminClient>(info.This());
|
||||||
|
|
||||||
|
// Get the timeout
|
||||||
|
int timeout = Nan::To<int32_t>(info[1]).FromJust();
|
||||||
|
|
||||||
|
std::string errstr;
|
||||||
|
// Get that topic we want to create
|
||||||
|
rd_kafka_NewTopic_t* topic = Conversion::Admin::FromV8TopicObject(
|
||||||
|
info[0].As<v8::Object>(), errstr);
|
||||||
|
|
||||||
|
if (topic == NULL) {
|
||||||
|
Nan::ThrowError(errstr.c_str());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queue up dat work
|
||||||
|
Nan::AsyncQueueWorker(
|
||||||
|
new Workers::AdminClientCreateTopic(callback, client, topic, timeout));
|
||||||
|
|
||||||
|
return info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete topic
|
||||||
|
*/
|
||||||
|
NAN_METHOD(AdminClient::NodeDeleteTopic) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 3 || !info[2]->IsFunction()) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[1]->IsNumber() || !info[0]->IsString()) {
|
||||||
|
return Nan::ThrowError("Must provide 'timeout', and 'topicName'");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the final callback object
|
||||||
|
v8::Local<v8::Function> cb = info[2].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
AdminClient* client = ObjectWrap::Unwrap<AdminClient>(info.This());
|
||||||
|
|
||||||
|
// Get the topic name from the string
|
||||||
|
std::string topic_name = Util::FromV8String(
|
||||||
|
Nan::To<v8::String>(info[0]).ToLocalChecked());
|
||||||
|
|
||||||
|
// Get the timeout
|
||||||
|
int timeout = Nan::To<int32_t>(info[1]).FromJust();
|
||||||
|
|
||||||
|
// Get that topic we want to create
|
||||||
|
rd_kafka_DeleteTopic_t* topic = rd_kafka_DeleteTopic_new(
|
||||||
|
topic_name.c_str());
|
||||||
|
|
||||||
|
// Queue up dat work
|
||||||
|
Nan::AsyncQueueWorker(
|
||||||
|
new Workers::AdminClientDeleteTopic(callback, client, topic, timeout));
|
||||||
|
|
||||||
|
return info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete topic
|
||||||
|
*/
|
||||||
|
NAN_METHOD(AdminClient::NodeCreatePartitions) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 4) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[3]->IsFunction()) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a callback 2");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[2]->IsNumber() || !info[1]->IsNumber() || !info[0]->IsString()) {
|
||||||
|
return Nan::ThrowError(
|
||||||
|
"Must provide 'totalPartitions', 'timeout', and 'topicName'");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the final callback object
|
||||||
|
v8::Local<v8::Function> cb = info[3].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
AdminClient* client = ObjectWrap::Unwrap<AdminClient>(info.This());
|
||||||
|
|
||||||
|
// Get the timeout
|
||||||
|
int timeout = Nan::To<int32_t>(info[2]).FromJust();
|
||||||
|
|
||||||
|
// Get the total number of desired partitions
|
||||||
|
int partition_total_count = Nan::To<int32_t>(info[1]).FromJust();
|
||||||
|
|
||||||
|
// Get the topic name from the string
|
||||||
|
std::string topic_name = Util::FromV8String(
|
||||||
|
Nan::To<v8::String>(info[0]).ToLocalChecked());
|
||||||
|
|
||||||
|
// Create an error buffer we can throw
|
||||||
|
char* errbuf = reinterpret_cast<char*>(malloc(100));
|
||||||
|
|
||||||
|
// Create the new partitions request
|
||||||
|
rd_kafka_NewPartitions_t* new_partitions = rd_kafka_NewPartitions_new(
|
||||||
|
topic_name.c_str(), partition_total_count, errbuf, 100);
|
||||||
|
|
||||||
|
// If we got a failure on the create new partitions request,
|
||||||
|
// fail here
|
||||||
|
if (new_partitions == NULL) {
|
||||||
|
return Nan::ThrowError(errbuf);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queue up dat work
|
||||||
|
Nan::AsyncQueueWorker(new Workers::AdminClientCreatePartitions(
|
||||||
|
callback, client, new_partitions, timeout));
|
||||||
|
|
||||||
|
return info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
77
src/admin.h
Normal file
77
src/admin.h
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_ADMIN_H_
|
||||||
|
#define SRC_ADMIN_H_
|
||||||
|
|
||||||
|
#include <nan.h>
|
||||||
|
#include <uv.h>
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
#include "rdkafka.h" // NOLINT
|
||||||
|
|
||||||
|
#include "src/common.h"
|
||||||
|
#include "src/connection.h"
|
||||||
|
#include "src/callbacks.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief KafkaConsumer v8 wrapped object.
|
||||||
|
*
|
||||||
|
* Specializes the connection to wrap a consumer object through compositional
|
||||||
|
* inheritence. Establishes its prototype in node through `Init`
|
||||||
|
*
|
||||||
|
* @sa RdKafka::Handle
|
||||||
|
* @sa NodeKafka::Client
|
||||||
|
*/
|
||||||
|
|
||||||
|
class AdminClient : public Connection {
|
||||||
|
public:
|
||||||
|
static void Init(v8::Local<v8::Object>);
|
||||||
|
static v8::Local<v8::Object> NewInstance(v8::Local<v8::Value>);
|
||||||
|
|
||||||
|
void ActivateDispatchers();
|
||||||
|
void DeactivateDispatchers();
|
||||||
|
|
||||||
|
Baton Connect();
|
||||||
|
Baton Disconnect();
|
||||||
|
|
||||||
|
Baton CreateTopic(rd_kafka_NewTopic_t* topic, int timeout_ms);
|
||||||
|
Baton DeleteTopic(rd_kafka_DeleteTopic_t* topic, int timeout_ms);
|
||||||
|
Baton CreatePartitions(rd_kafka_NewPartitions_t* topic, int timeout_ms);
|
||||||
|
// Baton AlterConfig(rd_kafka_NewTopic_t* topic, int timeout_ms);
|
||||||
|
// Baton DescribeConfig(rd_kafka_NewTopic_t* topic, int timeout_ms);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
static Nan::Persistent<v8::Function> constructor;
|
||||||
|
static void New(const Nan::FunctionCallbackInfo<v8::Value>& info);
|
||||||
|
|
||||||
|
explicit AdminClient(Conf* globalConfig);
|
||||||
|
~AdminClient();
|
||||||
|
|
||||||
|
rd_kafka_queue_t* rkqu;
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Node methods
|
||||||
|
// static NAN_METHOD(NodeValidateTopic);
|
||||||
|
static NAN_METHOD(NodeCreateTopic);
|
||||||
|
static NAN_METHOD(NodeDeleteTopic);
|
||||||
|
static NAN_METHOD(NodeCreatePartitions);
|
||||||
|
|
||||||
|
static NAN_METHOD(NodeConnect);
|
||||||
|
static NAN_METHOD(NodeDisconnect);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_ADMIN_H_
|
94
src/binding.cc
Normal file
94
src/binding.cc
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include "src/binding.h"
|
||||||
|
|
||||||
|
using NodeKafka::Producer;
|
||||||
|
using NodeKafka::KafkaConsumer;
|
||||||
|
using NodeKafka::AdminClient;
|
||||||
|
using NodeKafka::Topic;
|
||||||
|
|
||||||
|
using node::AtExit;
|
||||||
|
using RdKafka::ErrorCode;
|
||||||
|
|
||||||
|
static void RdKafkaCleanup(void*) { // NOLINT
|
||||||
|
/*
|
||||||
|
* Wait for RdKafka to decommission.
|
||||||
|
* This is not strictly needed but
|
||||||
|
* allows RdKafka to clean up all its resources before the application
|
||||||
|
* exits so that memory profilers such as valgrind wont complain about
|
||||||
|
* memory leaks.
|
||||||
|
*/
|
||||||
|
|
||||||
|
RdKafka::wait_destroyed(5000);
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(NodeRdKafkaErr2Str) {
|
||||||
|
int points = Nan::To<int>(info[0]).FromJust();
|
||||||
|
// Cast to error code
|
||||||
|
RdKafka::ErrorCode err = static_cast<RdKafka::ErrorCode>(points);
|
||||||
|
|
||||||
|
std::string errstr = RdKafka::err2str(err);
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::New<v8::String>(errstr).ToLocalChecked());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(NodeRdKafkaBuildInFeatures) {
|
||||||
|
RdKafka::Conf * config = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||||
|
|
||||||
|
std::string features;
|
||||||
|
|
||||||
|
if (RdKafka::Conf::CONF_OK == config->get("builtin.features", features)) {
|
||||||
|
info.GetReturnValue().Set(Nan::New<v8::String>(features).ToLocalChecked());
|
||||||
|
} else {
|
||||||
|
info.GetReturnValue().Set(Nan::Undefined());
|
||||||
|
}
|
||||||
|
|
||||||
|
delete config;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConstantsInit(v8::Local<v8::Object> exports) {
|
||||||
|
v8::Local<v8::Object> topicConstants = Nan::New<v8::Object>();
|
||||||
|
|
||||||
|
// RdKafka Error Code definitions
|
||||||
|
NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::PARTITION_UA);
|
||||||
|
NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_BEGINNING);
|
||||||
|
NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_END);
|
||||||
|
NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_STORED);
|
||||||
|
NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_INVALID);
|
||||||
|
|
||||||
|
Nan::Set(exports, Nan::New("topic").ToLocalChecked(), topicConstants);
|
||||||
|
|
||||||
|
Nan::Set(exports, Nan::New("err2str").ToLocalChecked(),
|
||||||
|
Nan::GetFunction(Nan::New<v8::FunctionTemplate>(NodeRdKafkaErr2Str)).ToLocalChecked()); // NOLINT
|
||||||
|
|
||||||
|
Nan::Set(exports, Nan::New("features").ToLocalChecked(),
|
||||||
|
Nan::GetFunction(Nan::New<v8::FunctionTemplate>(NodeRdKafkaBuildInFeatures)).ToLocalChecked()); // NOLINT
|
||||||
|
}
|
||||||
|
|
||||||
|
void Init(v8::Local<v8::Object> exports, v8::Local<v8::Value> m_, void* v_) {
|
||||||
|
#if NODE_MAJOR_VERSION <= 9 || (NODE_MAJOR_VERSION == 10 && NODE_MINOR_VERSION <= 15)
|
||||||
|
AtExit(RdKafkaCleanup);
|
||||||
|
#else
|
||||||
|
v8::Local<v8::Context> context = Nan::GetCurrentContext();
|
||||||
|
node::Environment* env = node::GetCurrentEnvironment(context);
|
||||||
|
AtExit(env, RdKafkaCleanup, NULL);
|
||||||
|
#endif
|
||||||
|
KafkaConsumer::Init(exports);
|
||||||
|
Producer::Init(exports);
|
||||||
|
AdminClient::Init(exports);
|
||||||
|
Topic::Init(exports);
|
||||||
|
ConstantsInit(exports);
|
||||||
|
|
||||||
|
Nan::Set(exports, Nan::New("librdkafkaVersion").ToLocalChecked(),
|
||||||
|
Nan::New(RdKafka::version_str().c_str()).ToLocalChecked());
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_MODULE(kafka, Init)
|
25
src/binding.h
Normal file
25
src/binding.h
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_BINDING_H_
|
||||||
|
#define SRC_BINDING_H_
|
||||||
|
|
||||||
|
#include <nan.h>
|
||||||
|
#include <string>
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
#include "src/common.h"
|
||||||
|
#include "src/errors.h"
|
||||||
|
#include "src/config.h"
|
||||||
|
#include "src/connection.h"
|
||||||
|
#include "src/kafka-consumer.h"
|
||||||
|
#include "src/producer.h"
|
||||||
|
#include "src/topic.h"
|
||||||
|
#include "src/admin.h"
|
||||||
|
|
||||||
|
#endif // SRC_BINDING_H_
|
612
src/callbacks.cc
Normal file
612
src/callbacks.cc
Normal file
@ -0,0 +1,612 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
#include "src/callbacks.h"
|
||||||
|
#include "src/kafka-consumer.h"
|
||||||
|
|
||||||
|
using v8::Local;
|
||||||
|
using v8::Value;
|
||||||
|
using v8::Object;
|
||||||
|
using v8::String;
|
||||||
|
using v8::Array;
|
||||||
|
using v8::Number;
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
namespace Callbacks {
|
||||||
|
|
||||||
|
v8::Local<v8::Array> TopicPartitionListToV8Array(
|
||||||
|
std::vector<event_topic_partition_t> parts) {
|
||||||
|
v8::Local<v8::Array> tp_array = Nan::New<v8::Array>();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < parts.size(); i++) {
|
||||||
|
v8::Local<v8::Object> tp_obj = Nan::New<v8::Object>();
|
||||||
|
event_topic_partition_t tp = parts[i];
|
||||||
|
|
||||||
|
Nan::Set(tp_obj, Nan::New("topic").ToLocalChecked(),
|
||||||
|
Nan::New<v8::String>(tp.topic.c_str()).ToLocalChecked());
|
||||||
|
Nan::Set(tp_obj, Nan::New("partition").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(tp.partition));
|
||||||
|
|
||||||
|
if (tp.offset >= 0) {
|
||||||
|
Nan::Set(tp_obj, Nan::New("offset").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(tp.offset));
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Set(tp_array, i, tp_obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
return tp_array;
|
||||||
|
}
|
||||||
|
|
||||||
|
Dispatcher::Dispatcher() {
|
||||||
|
async = NULL;
|
||||||
|
uv_mutex_init(&async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
Dispatcher::~Dispatcher() {
|
||||||
|
if (callbacks.size() < 1) return;
|
||||||
|
|
||||||
|
for (size_t i=0; i < callbacks.size(); i++) {
|
||||||
|
callbacks[i].Reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
uv_mutex_destroy(&async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only run this if we aren't already listening
|
||||||
|
void Dispatcher::Activate() {
|
||||||
|
if (!async) {
|
||||||
|
async = new uv_async_t;
|
||||||
|
uv_async_init(uv_default_loop(), async, AsyncMessage_);
|
||||||
|
|
||||||
|
async->data = this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be able to run this regardless of whether it is active or not
|
||||||
|
void Dispatcher::Deactivate() {
|
||||||
|
if (async) {
|
||||||
|
uv_close(reinterpret_cast<uv_handle_t*>(async), NULL);
|
||||||
|
async = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Dispatcher::HasCallbacks() {
|
||||||
|
return callbacks.size() > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Dispatcher::Execute() {
|
||||||
|
if (async) {
|
||||||
|
uv_async_send(async);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Dispatcher::Dispatch(const int _argc, Local<Value> _argv[]) {
|
||||||
|
// This should probably be an array of v8 values
|
||||||
|
if (!HasCallbacks()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i=0; i < callbacks.size(); i++) {
|
||||||
|
v8::Local<v8::Function> f = Nan::New<v8::Function>(callbacks[i]);
|
||||||
|
Nan::Callback cb(f);
|
||||||
|
cb.Call(_argc, _argv);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Dispatcher::AddCallback(const v8::Local<v8::Function> &cb) {
|
||||||
|
Nan::Persistent<v8::Function,
|
||||||
|
Nan::CopyablePersistentTraits<v8::Function> > value(cb);
|
||||||
|
// PersistentCopyableFunction value(func);
|
||||||
|
callbacks.push_back(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Dispatcher::RemoveCallback(const v8::Local<v8::Function> &cb) {
|
||||||
|
for (size_t i=0; i < callbacks.size(); i++) {
|
||||||
|
if (callbacks[i] == cb) {
|
||||||
|
callbacks[i].Reset();
|
||||||
|
callbacks.erase(callbacks.begin() + i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
event_t::event_t(const RdKafka::Event &event) {
|
||||||
|
message = "";
|
||||||
|
fac = "";
|
||||||
|
|
||||||
|
type = event.type();
|
||||||
|
|
||||||
|
switch (type = event.type()) {
|
||||||
|
case RdKafka::Event::EVENT_ERROR:
|
||||||
|
message = RdKafka::err2str(event.err());
|
||||||
|
break;
|
||||||
|
case RdKafka::Event::EVENT_STATS:
|
||||||
|
message = event.str();
|
||||||
|
break;
|
||||||
|
case RdKafka::Event::EVENT_LOG:
|
||||||
|
severity = event.severity();
|
||||||
|
fac = event.fac();
|
||||||
|
message = event.str();
|
||||||
|
break;
|
||||||
|
case RdKafka::Event::EVENT_THROTTLE:
|
||||||
|
message = RdKafka::err2str(event.err());
|
||||||
|
throttle_time = event.throttle_time();
|
||||||
|
broker_name = event.broker_name();
|
||||||
|
broker_id = static_cast<int>(event.broker_id());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
message = event.str();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
event_t::~event_t() {}
|
||||||
|
|
||||||
|
// Event callback
|
||||||
|
Event::Event():
|
||||||
|
dispatcher() {}
|
||||||
|
|
||||||
|
Event::~Event() {}
|
||||||
|
|
||||||
|
void Event::event_cb(RdKafka::Event &event) {
|
||||||
|
// Second parameter is going to be an object with properties to
|
||||||
|
// represent the others.
|
||||||
|
|
||||||
|
if (!dispatcher.HasCallbacks()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
event_t e(event);
|
||||||
|
|
||||||
|
dispatcher.Add(e);
|
||||||
|
dispatcher.Execute();
|
||||||
|
}
|
||||||
|
|
||||||
|
EventDispatcher::EventDispatcher() {}
|
||||||
|
EventDispatcher::~EventDispatcher() {}
|
||||||
|
|
||||||
|
void EventDispatcher::Add(const event_t &e) {
|
||||||
|
scoped_mutex_lock lock(async_lock);
|
||||||
|
events.push_back(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
void EventDispatcher::Flush() {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
// Iterate through each of the currently stored events
|
||||||
|
// generate a callback object for each, setting to the members
|
||||||
|
// then
|
||||||
|
if (events.size() < 1) return;
|
||||||
|
|
||||||
|
const unsigned int argc = 2;
|
||||||
|
|
||||||
|
std::vector<event_t> _events;
|
||||||
|
{
|
||||||
|
scoped_mutex_lock lock(async_lock);
|
||||||
|
events.swap(_events);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i=0; i < _events.size(); i++) {
|
||||||
|
Local<Value> argv[argc] = {};
|
||||||
|
Local<Object> jsobj = Nan::New<Object>();
|
||||||
|
|
||||||
|
switch (_events[i].type) {
|
||||||
|
case RdKafka::Event::EVENT_ERROR:
|
||||||
|
argv[0] = Nan::New("error").ToLocalChecked();
|
||||||
|
argv[1] = Nan::Error(_events[i].message.c_str());
|
||||||
|
|
||||||
|
// if (event->err() == RdKafka::ERR__ALL_BROKERS_DOWN). Stop running
|
||||||
|
// This may be better suited to the node side of things
|
||||||
|
break;
|
||||||
|
case RdKafka::Event::EVENT_STATS:
|
||||||
|
argv[0] = Nan::New("stats").ToLocalChecked();
|
||||||
|
|
||||||
|
Nan::Set(jsobj, Nan::New("message").ToLocalChecked(),
|
||||||
|
Nan::New<String>(_events[i].message.c_str()).ToLocalChecked());
|
||||||
|
|
||||||
|
break;
|
||||||
|
case RdKafka::Event::EVENT_LOG:
|
||||||
|
argv[0] = Nan::New("log").ToLocalChecked();
|
||||||
|
|
||||||
|
Nan::Set(jsobj, Nan::New("severity").ToLocalChecked(),
|
||||||
|
Nan::New(_events[i].severity));
|
||||||
|
Nan::Set(jsobj, Nan::New("fac").ToLocalChecked(),
|
||||||
|
Nan::New(_events[i].fac.c_str()).ToLocalChecked());
|
||||||
|
Nan::Set(jsobj, Nan::New("message").ToLocalChecked(),
|
||||||
|
Nan::New(_events[i].message.c_str()).ToLocalChecked());
|
||||||
|
|
||||||
|
break;
|
||||||
|
case RdKafka::Event::EVENT_THROTTLE:
|
||||||
|
argv[0] = Nan::New("throttle").ToLocalChecked();
|
||||||
|
|
||||||
|
Nan::Set(jsobj, Nan::New("message").ToLocalChecked(),
|
||||||
|
Nan::New(_events[i].message.c_str()).ToLocalChecked());
|
||||||
|
|
||||||
|
Nan::Set(jsobj, Nan::New("throttleTime").ToLocalChecked(),
|
||||||
|
Nan::New(_events[i].throttle_time));
|
||||||
|
Nan::Set(jsobj, Nan::New("brokerName").ToLocalChecked(),
|
||||||
|
Nan::New(_events[i].broker_name).ToLocalChecked());
|
||||||
|
Nan::Set(jsobj, Nan::New("brokerId").ToLocalChecked(),
|
||||||
|
Nan::New<Number>(_events[i].broker_id));
|
||||||
|
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
argv[0] = Nan::New("event").ToLocalChecked();
|
||||||
|
|
||||||
|
Nan::Set(jsobj, Nan::New("message").ToLocalChecked(),
|
||||||
|
Nan::New(events[i].message.c_str()).ToLocalChecked());
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_events[i].type != RdKafka::Event::EVENT_ERROR) {
|
||||||
|
// error would be assigned already
|
||||||
|
argv[1] = jsobj;
|
||||||
|
}
|
||||||
|
|
||||||
|
Dispatch(argc, argv);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DeliveryReportDispatcher::DeliveryReportDispatcher() {}
|
||||||
|
DeliveryReportDispatcher::~DeliveryReportDispatcher() {}
|
||||||
|
|
||||||
|
size_t DeliveryReportDispatcher::Add(const DeliveryReport &e) {
|
||||||
|
scoped_mutex_lock lock(async_lock);
|
||||||
|
events.push_back(e);
|
||||||
|
return events.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
void DeliveryReportDispatcher::Flush() {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
const unsigned int argc = 2;
|
||||||
|
|
||||||
|
size_t outstanding_event_count = 0;
|
||||||
|
std::vector<DeliveryReport> events_list;
|
||||||
|
{
|
||||||
|
scoped_mutex_lock lock(async_lock);
|
||||||
|
outstanding_event_count = events.size();
|
||||||
|
const size_t flush_count = std::min<size_t>(outstanding_event_count, 100UL);
|
||||||
|
events_list.reserve(flush_count);
|
||||||
|
for (size_t i = 0; i < flush_count; i++) {
|
||||||
|
events_list.emplace_back(std::move(events.front()));
|
||||||
|
events.pop_front();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < events_list.size(); i++) {
|
||||||
|
v8::Local<v8::Value> argv[argc] = {};
|
||||||
|
|
||||||
|
const DeliveryReport& event = events_list[i];
|
||||||
|
|
||||||
|
if (event.is_error) {
|
||||||
|
// If it is an error we need the first argument to be set
|
||||||
|
argv[0] = Nan::Error(event.error_string.c_str());
|
||||||
|
} else {
|
||||||
|
argv[0] = Nan::Null();
|
||||||
|
}
|
||||||
|
Local<Object> jsobj(Nan::New<Object>());
|
||||||
|
|
||||||
|
Nan::Set(jsobj, Nan::New("topic").ToLocalChecked(),
|
||||||
|
Nan::New(event.topic_name).ToLocalChecked());
|
||||||
|
Nan::Set(jsobj, Nan::New("partition").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(event.partition));
|
||||||
|
Nan::Set(jsobj, Nan::New("offset").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(event.offset));
|
||||||
|
|
||||||
|
if (event.key) {
|
||||||
|
Nan::MaybeLocal<v8::Object> buff = Nan::NewBuffer(
|
||||||
|
static_cast<char*>(event.key),
|
||||||
|
static_cast<int>(event.key_len));
|
||||||
|
|
||||||
|
Nan::Set(jsobj, Nan::New("key").ToLocalChecked(),
|
||||||
|
buff.ToLocalChecked());
|
||||||
|
} else {
|
||||||
|
Nan::Set(jsobj, Nan::New("key").ToLocalChecked(), Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (event.opaque) {
|
||||||
|
Nan::Persistent<v8::Value> * persistent =
|
||||||
|
static_cast<Nan::Persistent<v8::Value> *>(event.opaque);
|
||||||
|
v8::Local<v8::Value> object = Nan::New(*persistent);
|
||||||
|
Nan::Set(jsobj, Nan::New("opaque").ToLocalChecked(), object);
|
||||||
|
|
||||||
|
// Okay... now reset and destroy the persistent handle
|
||||||
|
persistent->Reset();
|
||||||
|
|
||||||
|
// Get rid of the persistent since we are making it local
|
||||||
|
delete persistent;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (event.timestamp > -1) {
|
||||||
|
Nan::Set(jsobj, Nan::New("timestamp").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(event.timestamp));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (event.m_include_payload) {
|
||||||
|
if (event.payload) {
|
||||||
|
Nan::MaybeLocal<v8::Object> buff = Nan::NewBuffer(
|
||||||
|
static_cast<char*>(event.payload),
|
||||||
|
static_cast<int>(event.len));
|
||||||
|
|
||||||
|
Nan::Set(jsobj, Nan::New<v8::String>("value").ToLocalChecked(),
|
||||||
|
buff.ToLocalChecked());
|
||||||
|
} else {
|
||||||
|
Nan::Set(jsobj, Nan::New<v8::String>("value").ToLocalChecked(),
|
||||||
|
Nan::Null());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Set(jsobj, Nan::New<v8::String>("size").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(event.len));
|
||||||
|
|
||||||
|
argv[1] = jsobj;
|
||||||
|
|
||||||
|
Dispatch(argc, argv);
|
||||||
|
}
|
||||||
|
if (outstanding_event_count > events_list.size()) {
|
||||||
|
Execute();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This only exists to circumvent the problem with not being able to execute JS
|
||||||
|
// on any thread other than the main thread.
|
||||||
|
|
||||||
|
// I still think there may be better alternatives, because there is a lot of
|
||||||
|
// duplication here
|
||||||
|
DeliveryReport::DeliveryReport(RdKafka::Message &message, bool include_payload) : // NOLINT
|
||||||
|
m_include_payload(include_payload) {
|
||||||
|
if (message.err() == RdKafka::ERR_NO_ERROR) {
|
||||||
|
is_error = false;
|
||||||
|
} else {
|
||||||
|
is_error = true;
|
||||||
|
error_code = message.err();
|
||||||
|
error_string = message.errstr();
|
||||||
|
}
|
||||||
|
|
||||||
|
topic_name = message.topic_name();
|
||||||
|
partition = message.partition();
|
||||||
|
offset = message.offset();
|
||||||
|
|
||||||
|
if (message.timestamp().type !=
|
||||||
|
RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
|
||||||
|
timestamp = message.timestamp().timestamp;
|
||||||
|
} else {
|
||||||
|
timestamp = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Key length.
|
||||||
|
key_len = message.key_len();
|
||||||
|
|
||||||
|
// It is okay if this is null
|
||||||
|
if (message.key_pointer()) {
|
||||||
|
key = malloc(message.key_len());
|
||||||
|
memcpy(key, message.key_pointer(), message.key_len());
|
||||||
|
} else {
|
||||||
|
key = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (message.msg_opaque()) {
|
||||||
|
opaque = message.msg_opaque();
|
||||||
|
} else {
|
||||||
|
opaque = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
len = message.len();
|
||||||
|
|
||||||
|
if (m_include_payload && message.payload()) {
|
||||||
|
// this pointer will be owned and freed by the Nan::NewBuffer
|
||||||
|
// created in DeliveryReportDispatcher::Flush()
|
||||||
|
payload = malloc(len);
|
||||||
|
memcpy(payload, message.payload(), len);
|
||||||
|
} else {
|
||||||
|
payload = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DeliveryReport::~DeliveryReport() {}
|
||||||
|
|
||||||
|
// Delivery Report
|
||||||
|
|
||||||
|
Delivery::Delivery():
|
||||||
|
dispatcher() {
|
||||||
|
m_dr_msg_cb = false;
|
||||||
|
}
|
||||||
|
Delivery::~Delivery() {}
|
||||||
|
|
||||||
|
|
||||||
|
void Delivery::SendMessageBuffer(bool send_dr_msg) {
|
||||||
|
m_dr_msg_cb = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Delivery::dr_cb(RdKafka::Message &message) {
|
||||||
|
if (!dispatcher.HasCallbacks()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
DeliveryReport msg(message, m_dr_msg_cb);
|
||||||
|
if (dispatcher.Add(msg) == 1) {
|
||||||
|
dispatcher.Execute();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebalance CB
|
||||||
|
|
||||||
|
RebalanceDispatcher::RebalanceDispatcher() {}
|
||||||
|
RebalanceDispatcher::~RebalanceDispatcher() {}
|
||||||
|
|
||||||
|
void RebalanceDispatcher::Add(const rebalance_event_t &e) {
|
||||||
|
scoped_mutex_lock lock(async_lock);
|
||||||
|
m_events.push_back(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RebalanceDispatcher::Flush() {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
// Iterate through each of the currently stored events
|
||||||
|
// generate a callback object for each, setting to the members
|
||||||
|
// then
|
||||||
|
|
||||||
|
if (m_events.size() < 1) return;
|
||||||
|
|
||||||
|
const unsigned int argc = 2;
|
||||||
|
|
||||||
|
std::vector<rebalance_event_t> events;
|
||||||
|
{
|
||||||
|
scoped_mutex_lock lock(async_lock);
|
||||||
|
m_events.swap(events);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i=0; i < events.size(); i++) {
|
||||||
|
v8::Local<v8::Value> argv[argc] = {};
|
||||||
|
|
||||||
|
if (events[i].err == RdKafka::ERR_NO_ERROR) {
|
||||||
|
argv[0] = Nan::Undefined();
|
||||||
|
} else {
|
||||||
|
// ERR__ASSIGN_PARTITIONS? Special case? Nah
|
||||||
|
argv[0] = Nan::New(events[i].err);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<event_topic_partition_t> parts = events[i].partitions;
|
||||||
|
|
||||||
|
// Now convert the TopicPartition list to a JS array
|
||||||
|
argv[1] = TopicPartitionListToV8Array(events[i].partitions);
|
||||||
|
|
||||||
|
Dispatch(argc, argv);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Rebalance::rebalance_cb(RdKafka::KafkaConsumer *consumer,
|
||||||
|
RdKafka::ErrorCode err, std::vector<RdKafka::TopicPartition*> &partitions) {
|
||||||
|
dispatcher.Add(rebalance_event_t(err, partitions));
|
||||||
|
dispatcher.Execute();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset Commit CB
|
||||||
|
|
||||||
|
OffsetCommitDispatcher::OffsetCommitDispatcher() {}
|
||||||
|
OffsetCommitDispatcher::~OffsetCommitDispatcher() {}
|
||||||
|
|
||||||
|
void OffsetCommitDispatcher::Add(const offset_commit_event_t &e) {
|
||||||
|
scoped_mutex_lock lock(async_lock);
|
||||||
|
m_events.push_back(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OffsetCommitDispatcher::Flush() {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
// Iterate through each of the currently stored events
|
||||||
|
// generate a callback object for each, setting to the members
|
||||||
|
// then
|
||||||
|
|
||||||
|
if (m_events.size() < 1) return;
|
||||||
|
|
||||||
|
const unsigned int argc = 2;
|
||||||
|
|
||||||
|
std::vector<offset_commit_event_t> events;
|
||||||
|
{
|
||||||
|
scoped_mutex_lock lock(async_lock);
|
||||||
|
m_events.swap(events);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < events.size(); i++) {
|
||||||
|
v8::Local<v8::Value> argv[argc] = {};
|
||||||
|
|
||||||
|
if (events[i].err == RdKafka::ERR_NO_ERROR) {
|
||||||
|
argv[0] = Nan::Undefined();
|
||||||
|
} else {
|
||||||
|
argv[0] = Nan::New(events[i].err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now convert the TopicPartition list to a JS array
|
||||||
|
argv[1] = TopicPartitionListToV8Array(events[i].partitions);
|
||||||
|
|
||||||
|
Dispatch(argc, argv);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void OffsetCommit::offset_commit_cb(RdKafka::ErrorCode err,
|
||||||
|
std::vector<RdKafka::TopicPartition*> &offsets) {
|
||||||
|
dispatcher.Add(offset_commit_event_t(err, offsets));
|
||||||
|
dispatcher.Execute();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Partitioner callback
|
||||||
|
|
||||||
|
Partitioner::Partitioner() {}
|
||||||
|
Partitioner::~Partitioner() {}
|
||||||
|
|
||||||
|
int32_t Partitioner::partitioner_cb(const RdKafka::Topic *topic,
|
||||||
|
const std::string *key,
|
||||||
|
int32_t partition_cnt,
|
||||||
|
void *msg_opaque) {
|
||||||
|
// Send this and get the callback and parse the int
|
||||||
|
if (callback.IsEmpty()) {
|
||||||
|
// default behavior
|
||||||
|
return random(topic, partition_cnt);
|
||||||
|
}
|
||||||
|
|
||||||
|
Local<Value> argv[3] = {};
|
||||||
|
|
||||||
|
argv[0] = Nan::New<v8::String>(topic->name().c_str()).ToLocalChecked();
|
||||||
|
if (key->empty()) {
|
||||||
|
argv[1] = Nan::Null();
|
||||||
|
} else {
|
||||||
|
argv[1] = Nan::New<v8::String>(key->c_str()).ToLocalChecked();
|
||||||
|
}
|
||||||
|
|
||||||
|
argv[2] = Nan::New<v8::Int32>(partition_cnt);
|
||||||
|
|
||||||
|
v8::Local<v8::Value> return_value = callback.Call(3, argv);
|
||||||
|
|
||||||
|
Nan::Maybe<int32_t> partition_return = Nan::To<int32_t>(return_value);
|
||||||
|
|
||||||
|
int32_t chosen_partition;
|
||||||
|
|
||||||
|
if (partition_return.IsNothing()) {
|
||||||
|
chosen_partition = RdKafka::Topic::PARTITION_UA;
|
||||||
|
} else {
|
||||||
|
chosen_partition = partition_return.FromJust();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!topic->partition_available(chosen_partition)) {
|
||||||
|
return RdKafka::Topic::PARTITION_UA;
|
||||||
|
}
|
||||||
|
|
||||||
|
return chosen_partition;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int Partitioner::djb_hash(const char *str, size_t len) {
|
||||||
|
unsigned int hash = 5381;
|
||||||
|
for (size_t i = 0 ; i < len ; i++)
|
||||||
|
hash = ((hash << 5) + hash) + str[i];
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int Partitioner::random(const RdKafka::Topic *topic, int32_t max) {
|
||||||
|
int32_t random_partition = rand() % max; // NOLINT
|
||||||
|
|
||||||
|
if (topic->partition_available(random_partition)) {
|
||||||
|
return random_partition;
|
||||||
|
} else {
|
||||||
|
return RdKafka::Topic::PARTITION_UA;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Partitioner::SetCallback(v8::Local<v8::Function> cb) {
|
||||||
|
callback(cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
} // end namespace Callbacks
|
||||||
|
|
||||||
|
} // End namespace NodeKafka
|
265
src/callbacks.h
Normal file
265
src/callbacks.h
Normal file
@ -0,0 +1,265 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_CALLBACKS_H_
|
||||||
|
#define SRC_CALLBACKS_H_
|
||||||
|
|
||||||
|
#include <uv.h>
|
||||||
|
#include <nan.h>
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <deque>
|
||||||
|
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
#include "src/common.h"
|
||||||
|
|
||||||
|
typedef Nan::Persistent<v8::Function,
|
||||||
|
Nan::CopyablePersistentTraits<v8::Function> > PersistentCopyableFunction;
|
||||||
|
typedef std::vector<PersistentCopyableFunction> CopyableFunctionList;
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
class KafkaConsumer;
|
||||||
|
|
||||||
|
namespace Callbacks {
|
||||||
|
|
||||||
|
class Dispatcher {
|
||||||
|
public:
|
||||||
|
Dispatcher();
|
||||||
|
~Dispatcher();
|
||||||
|
void Dispatch(const int, v8::Local<v8::Value> []);
|
||||||
|
void AddCallback(const v8::Local<v8::Function>&);
|
||||||
|
void RemoveCallback(const v8::Local<v8::Function>&);
|
||||||
|
bool HasCallbacks();
|
||||||
|
virtual void Flush() = 0;
|
||||||
|
void Execute();
|
||||||
|
void Activate();
|
||||||
|
void Deactivate();
|
||||||
|
|
||||||
|
protected:
|
||||||
|
std::vector<v8::Persistent<v8::Function, v8::CopyablePersistentTraits<v8::Function> > > callbacks; // NOLINT
|
||||||
|
|
||||||
|
uv_mutex_t async_lock;
|
||||||
|
|
||||||
|
private:
|
||||||
|
NAN_INLINE static NAUV_WORK_CB(AsyncMessage_) {
|
||||||
|
Dispatcher *dispatcher =
|
||||||
|
static_cast<Dispatcher*>(async->data);
|
||||||
|
dispatcher->Flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
uv_async_t *async;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct event_t {
|
||||||
|
RdKafka::Event::Type type;
|
||||||
|
std::string message;
|
||||||
|
|
||||||
|
RdKafka::Event::Severity severity;
|
||||||
|
std::string fac;
|
||||||
|
|
||||||
|
std::string broker_name;
|
||||||
|
int throttle_time;
|
||||||
|
int broker_id;
|
||||||
|
|
||||||
|
explicit event_t(const RdKafka::Event &);
|
||||||
|
~event_t();
|
||||||
|
};
|
||||||
|
|
||||||
|
class EventDispatcher : public Dispatcher {
|
||||||
|
public:
|
||||||
|
EventDispatcher();
|
||||||
|
~EventDispatcher();
|
||||||
|
void Add(const event_t &);
|
||||||
|
void Flush();
|
||||||
|
protected:
|
||||||
|
std::vector<event_t> events;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Event : public RdKafka::EventCb {
|
||||||
|
public:
|
||||||
|
Event();
|
||||||
|
~Event();
|
||||||
|
void event_cb(RdKafka::Event&);
|
||||||
|
EventDispatcher dispatcher;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delivery report class
|
||||||
|
*
|
||||||
|
* Class exists because the callback needs to be able to give information
|
||||||
|
* to the v8 main thread that it can use to formulate its object.
|
||||||
|
*/
|
||||||
|
class DeliveryReport {
|
||||||
|
public:
|
||||||
|
DeliveryReport(RdKafka::Message &, bool);
|
||||||
|
~DeliveryReport();
|
||||||
|
|
||||||
|
// Whether we include the payload. Is the second parameter to the constructor
|
||||||
|
bool m_include_payload;
|
||||||
|
|
||||||
|
// If it is an error these will be set
|
||||||
|
bool is_error;
|
||||||
|
std::string error_string;
|
||||||
|
RdKafka::ErrorCode error_code;
|
||||||
|
|
||||||
|
// If it is not
|
||||||
|
std::string topic_name;
|
||||||
|
int32_t partition;
|
||||||
|
int64_t offset;
|
||||||
|
int64_t timestamp;
|
||||||
|
|
||||||
|
// Opaque token used. Local value
|
||||||
|
void* opaque;
|
||||||
|
|
||||||
|
// Key. It is a pointer to avoid corrupted values
|
||||||
|
// https://github.com/Blizzard/node-rdkafka/issues/208
|
||||||
|
void* key;
|
||||||
|
size_t key_len;
|
||||||
|
|
||||||
|
size_t len;
|
||||||
|
void* payload;
|
||||||
|
};
|
||||||
|
|
||||||
|
class DeliveryReportDispatcher : public Dispatcher {
|
||||||
|
public:
|
||||||
|
DeliveryReportDispatcher();
|
||||||
|
~DeliveryReportDispatcher();
|
||||||
|
void Flush();
|
||||||
|
size_t Add(const DeliveryReport &);
|
||||||
|
protected:
|
||||||
|
std::deque<DeliveryReport> events;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Delivery : public RdKafka::DeliveryReportCb {
|
||||||
|
public:
|
||||||
|
Delivery();
|
||||||
|
~Delivery();
|
||||||
|
void dr_cb(RdKafka::Message&);
|
||||||
|
DeliveryReportDispatcher dispatcher;
|
||||||
|
void SendMessageBuffer(bool dr_copy_payload);
|
||||||
|
protected:
|
||||||
|
bool m_dr_msg_cb;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Rebalance dispatcher
|
||||||
|
|
||||||
|
struct event_topic_partition_t {
|
||||||
|
std::string topic;
|
||||||
|
int partition;
|
||||||
|
int64_t offset;
|
||||||
|
|
||||||
|
event_topic_partition_t(std::string p_topic, int p_partition, int64_t p_offset): // NOLINT
|
||||||
|
topic(p_topic),
|
||||||
|
partition(p_partition),
|
||||||
|
offset(p_offset) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct rebalance_event_t {
|
||||||
|
RdKafka::ErrorCode err;
|
||||||
|
std::vector<event_topic_partition_t> partitions;
|
||||||
|
|
||||||
|
rebalance_event_t(RdKafka::ErrorCode p_err,
|
||||||
|
std::vector<RdKafka::TopicPartition*> p_partitions):
|
||||||
|
err(p_err) {
|
||||||
|
// Iterate over the topic partitions because we won't have them later
|
||||||
|
for (size_t topic_partition_i = 0;
|
||||||
|
topic_partition_i < p_partitions.size(); topic_partition_i++) {
|
||||||
|
RdKafka::TopicPartition* topic_partition =
|
||||||
|
p_partitions[topic_partition_i];
|
||||||
|
|
||||||
|
event_topic_partition_t tp(
|
||||||
|
topic_partition->topic(),
|
||||||
|
topic_partition->partition(),
|
||||||
|
topic_partition->offset());
|
||||||
|
|
||||||
|
partitions.push_back(tp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct offset_commit_event_t {
|
||||||
|
RdKafka::ErrorCode err;
|
||||||
|
std::vector<event_topic_partition_t> partitions;
|
||||||
|
|
||||||
|
offset_commit_event_t(RdKafka::ErrorCode p_err,
|
||||||
|
const std::vector<RdKafka::TopicPartition*> &p_partitions):
|
||||||
|
err(p_err) {
|
||||||
|
// Iterate over the topic partitions because we won't have them later
|
||||||
|
for (size_t topic_partition_i = 0;
|
||||||
|
topic_partition_i < p_partitions.size(); topic_partition_i++) {
|
||||||
|
RdKafka::TopicPartition* topic_partition =
|
||||||
|
p_partitions[topic_partition_i];
|
||||||
|
|
||||||
|
// Just reuse this thing because it's the same exact thing we need
|
||||||
|
event_topic_partition_t tp(
|
||||||
|
topic_partition->topic(),
|
||||||
|
topic_partition->partition(),
|
||||||
|
topic_partition->offset());
|
||||||
|
|
||||||
|
partitions.push_back(tp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class RebalanceDispatcher : public Dispatcher {
|
||||||
|
public:
|
||||||
|
RebalanceDispatcher();
|
||||||
|
~RebalanceDispatcher();
|
||||||
|
void Add(const rebalance_event_t &);
|
||||||
|
void Flush();
|
||||||
|
protected:
|
||||||
|
std::vector<rebalance_event_t> m_events;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Rebalance : public RdKafka::RebalanceCb {
|
||||||
|
public:
|
||||||
|
void rebalance_cb(RdKafka::KafkaConsumer *, RdKafka::ErrorCode,
|
||||||
|
std::vector<RdKafka::TopicPartition*> &);
|
||||||
|
|
||||||
|
RebalanceDispatcher dispatcher;
|
||||||
|
private:
|
||||||
|
v8::Persistent<v8::Function> m_cb;
|
||||||
|
};
|
||||||
|
|
||||||
|
class OffsetCommitDispatcher : public Dispatcher {
|
||||||
|
public:
|
||||||
|
OffsetCommitDispatcher();
|
||||||
|
~OffsetCommitDispatcher();
|
||||||
|
void Add(const offset_commit_event_t &);
|
||||||
|
void Flush();
|
||||||
|
protected:
|
||||||
|
std::vector<offset_commit_event_t> m_events;
|
||||||
|
};
|
||||||
|
|
||||||
|
class OffsetCommit : public RdKafka::OffsetCommitCb {
|
||||||
|
public:
|
||||||
|
void offset_commit_cb(RdKafka::ErrorCode, std::vector<RdKafka::TopicPartition*> &); // NOLINT
|
||||||
|
|
||||||
|
OffsetCommitDispatcher dispatcher;
|
||||||
|
private:
|
||||||
|
v8::Persistent<v8::Function> m_cb;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Partitioner : public RdKafka::PartitionerCb {
|
||||||
|
public:
|
||||||
|
Partitioner();
|
||||||
|
~Partitioner();
|
||||||
|
int32_t partitioner_cb( const RdKafka::Topic*, const std::string*, int32_t, void*); // NOLINT
|
||||||
|
Nan::Callback callback; // NOLINT
|
||||||
|
void SetCallback(v8::Local<v8::Function>);
|
||||||
|
private:
|
||||||
|
static unsigned int djb_hash(const char*, size_t);
|
||||||
|
static unsigned int random(const RdKafka::Topic*, int32_t);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Callbacks
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_CALLBACKS_H_
|
582
src/common.cc
Normal file
582
src/common.cc
Normal file
@ -0,0 +1,582 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "src/common.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
void Log(std::string str) {
|
||||||
|
std::cerr << "% " << str.c_str() << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
T GetParameter(v8::Local<v8::Object> object, std::string field_name, T def) {
|
||||||
|
v8::Local<v8::String> field = Nan::New(field_name.c_str()).ToLocalChecked();
|
||||||
|
if (Nan::Has(object, field).FromMaybe(false)) {
|
||||||
|
Nan::Maybe<T> maybeT = Nan::To<T>(Nan::Get(object, field).ToLocalChecked());
|
||||||
|
if (maybeT.IsNothing()) {
|
||||||
|
return def;
|
||||||
|
} else {
|
||||||
|
return maybeT.FromJust();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return def;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
int64_t GetParameter<int64_t>(v8::Local<v8::Object> object,
|
||||||
|
std::string field_name, int64_t def) {
|
||||||
|
v8::Local<v8::String> field = Nan::New(field_name.c_str()).ToLocalChecked();
|
||||||
|
if (Nan::Has(object, field).FromMaybe(false)) {
|
||||||
|
v8::Local<v8::Value> v = Nan::Get(object, field).ToLocalChecked();
|
||||||
|
|
||||||
|
if (!v->IsNumber()) {
|
||||||
|
return def;
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Maybe<int64_t> maybeInt = Nan::To<int64_t>(v);
|
||||||
|
if (maybeInt.IsNothing()) {
|
||||||
|
return def;
|
||||||
|
} else {
|
||||||
|
return maybeInt.FromJust();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return def;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
bool GetParameter<bool>(v8::Local<v8::Object> object,
|
||||||
|
std::string field_name, bool def) {
|
||||||
|
v8::Local<v8::String> field = Nan::New(field_name.c_str()).ToLocalChecked();
|
||||||
|
if (Nan::Has(object, field).FromMaybe(false)) {
|
||||||
|
v8::Local<v8::Value> v = Nan::Get(object, field).ToLocalChecked();
|
||||||
|
|
||||||
|
if (!v->IsBoolean()) {
|
||||||
|
return def;
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Maybe<bool> maybeInt = Nan::To<bool>(v);
|
||||||
|
if (maybeInt.IsNothing()) {
|
||||||
|
return def;
|
||||||
|
} else {
|
||||||
|
return maybeInt.FromJust();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return def;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
int GetParameter<int>(v8::Local<v8::Object> object,
|
||||||
|
std::string field_name, int def) {
|
||||||
|
return static_cast<int>(GetParameter<int64_t>(object, field_name, def));
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
std::string GetParameter<std::string>(v8::Local<v8::Object> object,
|
||||||
|
std::string field_name,
|
||||||
|
std::string def) {
|
||||||
|
v8::Local<v8::String> field = Nan::New(field_name.c_str()).ToLocalChecked();
|
||||||
|
if (Nan::Has(object, field).FromMaybe(false)) {
|
||||||
|
v8::Local<v8::Value> parameter =
|
||||||
|
Nan::Get(object, field).ToLocalChecked();
|
||||||
|
// Nan::To<v8::String>();
|
||||||
|
|
||||||
|
if (!parameter->IsUndefined() && !parameter->IsNull()) {
|
||||||
|
v8::Local<v8::String> val = Nan::To<v8::String>(parameter)
|
||||||
|
.ToLocalChecked();
|
||||||
|
|
||||||
|
if (!val->IsUndefined() && !val->IsNull()) {
|
||||||
|
Nan::Utf8String parameterValue(val);
|
||||||
|
std::string parameterString(*parameterValue);
|
||||||
|
|
||||||
|
return parameterString;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return def;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<>
|
||||||
|
std::vector<std::string> GetParameter<std::vector<std::string> >(
|
||||||
|
v8::Local<v8::Object> object, std::string field_name,
|
||||||
|
std::vector<std::string> def) {
|
||||||
|
v8::Local<v8::String> field = Nan::New(field_name.c_str()).ToLocalChecked();
|
||||||
|
|
||||||
|
if (Nan::Has(object, field).FromMaybe(false)) {
|
||||||
|
v8::Local<v8::Value> maybeArray = Nan::Get(object, field).ToLocalChecked();
|
||||||
|
if (maybeArray->IsArray()) {
|
||||||
|
v8::Local<v8::Array> parameter = maybeArray.As<v8::Array>();
|
||||||
|
return v8ArrayToStringVector(parameter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return def;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::string> v8ArrayToStringVector(v8::Local<v8::Array> parameter) {
|
||||||
|
std::vector<std::string> newItem;
|
||||||
|
|
||||||
|
if (parameter->Length() >= 1) {
|
||||||
|
for (unsigned int i = 0; i < parameter->Length(); i++) {
|
||||||
|
v8::Local<v8::Value> v;
|
||||||
|
if (!Nan::Get(parameter, i).ToLocal(&v)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Nan::MaybeLocal<v8::String> p = Nan::To<v8::String>(v);
|
||||||
|
if (p.IsEmpty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Nan::Utf8String pVal(p.ToLocalChecked());
|
||||||
|
std::string pString(*pVal);
|
||||||
|
newItem.push_back(pString);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newItem;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Conversion {
|
||||||
|
namespace Topic {
|
||||||
|
|
||||||
|
std::vector<std::string> ToStringVector(v8::Local<v8::Array> parameter) {
|
||||||
|
std::vector<std::string> newItem;
|
||||||
|
|
||||||
|
if (parameter->Length() >= 1) {
|
||||||
|
for (unsigned int i = 0; i < parameter->Length(); i++) {
|
||||||
|
v8::Local<v8::Value> element;
|
||||||
|
if (!Nan::Get(parameter, i).ToLocal(&element)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!element->IsRegExp()) {
|
||||||
|
Nan::MaybeLocal<v8::String> p = Nan::To<v8::String>(element);
|
||||||
|
|
||||||
|
if (p.IsEmpty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Utf8String pVal(p.ToLocalChecked());
|
||||||
|
std::string pString(*pVal);
|
||||||
|
|
||||||
|
newItem.push_back(pString);
|
||||||
|
} else {
|
||||||
|
Nan::Utf8String pVal(element.As<v8::RegExp>()->GetSource());
|
||||||
|
std::string pString(*pVal);
|
||||||
|
|
||||||
|
Log(pString);
|
||||||
|
|
||||||
|
newItem.push_back(pString);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return newItem;
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Array> ToV8Array(std::vector<std::string> parameter) {
|
||||||
|
v8::Local<v8::Array> newItem = Nan::New<v8::Array>();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < parameter.size(); i++) {
|
||||||
|
std::string topic = parameter[i];
|
||||||
|
|
||||||
|
Nan::Set(newItem, i, Nan::New<v8::String>(topic).ToLocalChecked());
|
||||||
|
}
|
||||||
|
|
||||||
|
return newItem;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Topic
|
||||||
|
|
||||||
|
namespace TopicPartition {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief RdKafka::TopicPartition vector to a v8 Array
|
||||||
|
*
|
||||||
|
* @see v8ArrayToTopicPartitionVector
|
||||||
|
*/
|
||||||
|
v8::Local<v8::Array> ToV8Array(
|
||||||
|
std::vector<RdKafka::TopicPartition*> & topic_partition_list) { // NOLINT
|
||||||
|
v8::Local<v8::Array> array = Nan::New<v8::Array>();
|
||||||
|
for (size_t topic_partition_i = 0;
|
||||||
|
topic_partition_i < topic_partition_list.size(); topic_partition_i++) {
|
||||||
|
RdKafka::TopicPartition* topic_partition =
|
||||||
|
topic_partition_list[topic_partition_i];
|
||||||
|
|
||||||
|
if (topic_partition->err() != RdKafka::ErrorCode::ERR_NO_ERROR) {
|
||||||
|
Nan::Set(array, topic_partition_i,
|
||||||
|
Nan::Error(Nan::New(RdKafka::err2str(topic_partition->err()))
|
||||||
|
.ToLocalChecked()));
|
||||||
|
} else {
|
||||||
|
// We have the list now let's get the properties from it
|
||||||
|
v8::Local<v8::Object> obj = Nan::New<v8::Object>();
|
||||||
|
|
||||||
|
if (topic_partition->offset() != RdKafka::Topic::OFFSET_INVALID) {
|
||||||
|
Nan::Set(obj, Nan::New("offset").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(topic_partition->offset()));
|
||||||
|
}
|
||||||
|
Nan::Set(obj, Nan::New("partition").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(topic_partition->partition()));
|
||||||
|
Nan::Set(obj, Nan::New("topic").ToLocalChecked(),
|
||||||
|
Nan::New<v8::String>(topic_partition->topic().c_str())
|
||||||
|
.ToLocalChecked());
|
||||||
|
|
||||||
|
Nan::Set(array, topic_partition_i, obj);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return array;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief v8 Array of topic partitions to RdKafka::TopicPartition vector
|
||||||
|
*
|
||||||
|
* @see v8ArrayToTopicPartitionVector
|
||||||
|
*
|
||||||
|
* @note You must delete all the pointers inside here when you are done!!
|
||||||
|
*/
|
||||||
|
std::vector<RdKafka::TopicPartition*> FromV8Array(
|
||||||
|
const v8::Local<v8::Array> & topic_partition_list) {
|
||||||
|
// NOTE: ARRAY OF POINTERS! DELETE THEM WHEN YOU ARE FINISHED
|
||||||
|
std::vector<RdKafka::TopicPartition*> array;
|
||||||
|
|
||||||
|
for (size_t topic_partition_i = 0;
|
||||||
|
topic_partition_i < topic_partition_list->Length(); topic_partition_i++) {
|
||||||
|
v8::Local<v8::Value> topic_partition_value;
|
||||||
|
if (!Nan::Get(topic_partition_list, topic_partition_i)
|
||||||
|
.ToLocal(&topic_partition_value)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (topic_partition_value->IsObject()) {
|
||||||
|
array.push_back(FromV8Object(
|
||||||
|
Nan::To<v8::Object>(topic_partition_value).ToLocalChecked()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return array;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief v8::Object to RdKafka::TopicPartition
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
RdKafka::TopicPartition * FromV8Object(v8::Local<v8::Object> topic_partition) {
|
||||||
|
std::string topic = GetParameter<std::string>(topic_partition, "topic", "");
|
||||||
|
int partition = GetParameter<int>(topic_partition, "partition", -1);
|
||||||
|
int64_t offset = GetParameter<int64_t>(topic_partition, "offset", 0);
|
||||||
|
|
||||||
|
if (partition == -1) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (topic.empty()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return RdKafka::TopicPartition::create(topic, partition, offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace TopicPartition
|
||||||
|
|
||||||
|
namespace Metadata {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief RdKafka::Metadata to v8::Object
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
v8::Local<v8::Object> ToV8Object(RdKafka::Metadata* metadata) {
|
||||||
|
v8::Local<v8::Object> obj = Nan::New<v8::Object>();
|
||||||
|
|
||||||
|
v8::Local<v8::Array> broker_data = Nan::New<v8::Array>();
|
||||||
|
v8::Local<v8::Array> topic_data = Nan::New<v8::Array>();
|
||||||
|
|
||||||
|
const BrokerMetadataList* brokers = metadata->brokers(); // NOLINT
|
||||||
|
|
||||||
|
unsigned int broker_i = 0;
|
||||||
|
|
||||||
|
for (BrokerMetadataList::const_iterator it = brokers->begin();
|
||||||
|
it != brokers->end(); ++it, broker_i++) {
|
||||||
|
// Start iterating over brokers and set the object up
|
||||||
|
|
||||||
|
const RdKafka::BrokerMetadata* x = *it;
|
||||||
|
|
||||||
|
v8::Local<v8::Object> current_broker = Nan::New<v8::Object>();
|
||||||
|
|
||||||
|
Nan::Set(current_broker, Nan::New("id").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(x->id()));
|
||||||
|
Nan::Set(current_broker, Nan::New("host").ToLocalChecked(),
|
||||||
|
Nan::New<v8::String>(x->host().c_str()).ToLocalChecked());
|
||||||
|
Nan::Set(current_broker, Nan::New("port").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(x->port()));
|
||||||
|
|
||||||
|
Nan::Set(broker_data, broker_i, current_broker);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int topic_i = 0;
|
||||||
|
|
||||||
|
const TopicMetadataList* topics = metadata->topics();
|
||||||
|
|
||||||
|
for (TopicMetadataList::const_iterator it = topics->begin();
|
||||||
|
it != topics->end(); ++it, topic_i++) {
|
||||||
|
// Start iterating over topics
|
||||||
|
|
||||||
|
const RdKafka::TopicMetadata* x = *it;
|
||||||
|
|
||||||
|
v8::Local<v8::Object> current_topic = Nan::New<v8::Object>();
|
||||||
|
|
||||||
|
Nan::Set(current_topic, Nan::New("name").ToLocalChecked(),
|
||||||
|
Nan::New<v8::String>(x->topic().c_str()).ToLocalChecked());
|
||||||
|
|
||||||
|
v8::Local<v8::Array> current_topic_partitions = Nan::New<v8::Array>();
|
||||||
|
|
||||||
|
const PartitionMetadataList* current_partition_data = x->partitions();
|
||||||
|
|
||||||
|
unsigned int partition_i = 0;
|
||||||
|
PartitionMetadataList::const_iterator itt;
|
||||||
|
|
||||||
|
for (itt = current_partition_data->begin();
|
||||||
|
itt != current_partition_data->end(); ++itt, partition_i++) {
|
||||||
|
// partition iterate
|
||||||
|
const RdKafka::PartitionMetadata* xx = *itt;
|
||||||
|
|
||||||
|
v8::Local<v8::Object> current_partition = Nan::New<v8::Object>();
|
||||||
|
|
||||||
|
Nan::Set(current_partition, Nan::New("id").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(xx->id()));
|
||||||
|
Nan::Set(current_partition, Nan::New("leader").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(xx->leader()));
|
||||||
|
|
||||||
|
const std::vector<int32_t> * replicas = xx->replicas();
|
||||||
|
const std::vector<int32_t> * isrs = xx->isrs();
|
||||||
|
|
||||||
|
std::vector<int32_t>::const_iterator r_it;
|
||||||
|
std::vector<int32_t>::const_iterator i_it;
|
||||||
|
|
||||||
|
unsigned int r_i = 0;
|
||||||
|
unsigned int i_i = 0;
|
||||||
|
|
||||||
|
v8::Local<v8::Array> current_replicas = Nan::New<v8::Array>();
|
||||||
|
|
||||||
|
for (r_it = replicas->begin(); r_it != replicas->end(); ++r_it, r_i++) {
|
||||||
|
Nan::Set(current_replicas, r_i, Nan::New<v8::Int32>(*r_it));
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Array> current_isrs = Nan::New<v8::Array>();
|
||||||
|
|
||||||
|
for (i_it = isrs->begin(); i_it != isrs->end(); ++i_it, i_i++) {
|
||||||
|
Nan::Set(current_isrs, i_i, Nan::New<v8::Int32>(*i_it));
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Set(current_partition, Nan::New("replicas").ToLocalChecked(),
|
||||||
|
current_replicas);
|
||||||
|
Nan::Set(current_partition, Nan::New("isrs").ToLocalChecked(),
|
||||||
|
current_isrs);
|
||||||
|
|
||||||
|
Nan::Set(current_topic_partitions, partition_i, current_partition);
|
||||||
|
} // iterate over partitions
|
||||||
|
|
||||||
|
Nan::Set(current_topic, Nan::New("partitions").ToLocalChecked(),
|
||||||
|
current_topic_partitions);
|
||||||
|
|
||||||
|
Nan::Set(topic_data, topic_i, current_topic);
|
||||||
|
} // End iterating over topics
|
||||||
|
|
||||||
|
Nan::Set(obj, Nan::New("orig_broker_id").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(metadata->orig_broker_id()));
|
||||||
|
|
||||||
|
Nan::Set(obj, Nan::New("orig_broker_name").ToLocalChecked(),
|
||||||
|
Nan::New<v8::String>(metadata->orig_broker_name()).ToLocalChecked());
|
||||||
|
|
||||||
|
Nan::Set(obj, Nan::New("topics").ToLocalChecked(), topic_data);
|
||||||
|
Nan::Set(obj, Nan::New("brokers").ToLocalChecked(), broker_data);
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Metadata
|
||||||
|
|
||||||
|
namespace Message {
|
||||||
|
|
||||||
|
// Overload for all use cases except delivery reports
|
||||||
|
v8::Local<v8::Object> ToV8Object(RdKafka::Message *message) {
|
||||||
|
return ToV8Object(message, true, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Object> ToV8Object(RdKafka::Message *message,
|
||||||
|
bool include_payload,
|
||||||
|
bool include_headers) {
|
||||||
|
if (message->err() == RdKafka::ERR_NO_ERROR) {
|
||||||
|
v8::Local<v8::Object> pack = Nan::New<v8::Object>();
|
||||||
|
|
||||||
|
const void* message_payload = message->payload();
|
||||||
|
|
||||||
|
if (!include_payload) {
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("value").ToLocalChecked(),
|
||||||
|
Nan::Undefined());
|
||||||
|
} else if (message_payload) {
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("value").ToLocalChecked(),
|
||||||
|
Nan::Encode(message_payload, message->len(), Nan::Encoding::BUFFER));
|
||||||
|
} else {
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("value").ToLocalChecked(),
|
||||||
|
Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::Headers* headers;
|
||||||
|
if (((headers = message->headers()) != 0) && include_headers) {
|
||||||
|
v8::Local<v8::Array> v8headers = Nan::New<v8::Array>();
|
||||||
|
int index = 0;
|
||||||
|
std::vector<RdKafka::Headers::Header> all = headers->get_all();
|
||||||
|
for (std::vector<RdKafka::Headers::Header>::iterator it = all.begin();
|
||||||
|
it != all.end(); it++) {
|
||||||
|
v8::Local<v8::Object> v8header = Nan::New<v8::Object>();
|
||||||
|
Nan::Set(v8header, Nan::New<v8::String>(it->key()).ToLocalChecked(),
|
||||||
|
Nan::Encode(it->value_string(),
|
||||||
|
it->value_size(), Nan::Encoding::BUFFER));
|
||||||
|
Nan::Set(v8headers, index, v8header);
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
Nan::Set(pack,
|
||||||
|
Nan::New<v8::String>("headers").ToLocalChecked(), v8headers);
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("size").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(message->len()));
|
||||||
|
|
||||||
|
const void* key_payload = message->key_pointer();
|
||||||
|
|
||||||
|
if (key_payload) {
|
||||||
|
// We want this to also be a buffer to avoid corruption
|
||||||
|
// https://github.com/Blizzard/node-rdkafka/issues/208
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("key").ToLocalChecked(),
|
||||||
|
Nan::Encode(key_payload, message->key_len(), Nan::Encoding::BUFFER));
|
||||||
|
} else {
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("key").ToLocalChecked(),
|
||||||
|
Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("topic").ToLocalChecked(),
|
||||||
|
Nan::New<v8::String>(message->topic_name()).ToLocalChecked());
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("offset").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(message->offset()));
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("partition").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(message->partition()));
|
||||||
|
Nan::Set(pack, Nan::New<v8::String>("timestamp").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(message->timestamp().timestamp));
|
||||||
|
|
||||||
|
return pack;
|
||||||
|
} else {
|
||||||
|
return RdKafkaError(message->err());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Message
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @section Admin API models
|
||||||
|
*/
|
||||||
|
|
||||||
|
namespace Admin {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a low level rdkafka handle to represent a topic
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
rd_kafka_NewTopic_t* FromV8TopicObject(
|
||||||
|
v8::Local<v8::Object> object, std::string &errstr) { // NOLINT
|
||||||
|
std::string topic_name = GetParameter<std::string>(object, "topic", "");
|
||||||
|
int num_partitions = GetParameter<int>(object, "num_partitions", 0);
|
||||||
|
int replication_factor = GetParameter<int>(object, "replication_factor", 0);
|
||||||
|
|
||||||
|
// Too slow to allocate this every call but admin api
|
||||||
|
// shouldn't be called that often
|
||||||
|
char* errbuf = reinterpret_cast<char*>(malloc(100));
|
||||||
|
size_t errstr_size = 100;
|
||||||
|
|
||||||
|
rd_kafka_NewTopic_t* new_topic = rd_kafka_NewTopic_new(
|
||||||
|
topic_name.c_str(),
|
||||||
|
num_partitions,
|
||||||
|
replication_factor,
|
||||||
|
errbuf,
|
||||||
|
errstr_size);
|
||||||
|
|
||||||
|
if (new_topic == NULL) {
|
||||||
|
errstr = std::string(errbuf, errstr_size);
|
||||||
|
free(errbuf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
rd_kafka_resp_err_t err;
|
||||||
|
|
||||||
|
if (Nan::Has(object, Nan::New("config").ToLocalChecked()).FromMaybe(false)) {
|
||||||
|
// Get the config v8::Object that we can get parameters on
|
||||||
|
v8::Local<v8::Object> config =
|
||||||
|
Nan::Get(object, Nan::New("config").ToLocalChecked())
|
||||||
|
.ToLocalChecked().As<v8::Object>();
|
||||||
|
|
||||||
|
// Get all of the keys of the object
|
||||||
|
v8::MaybeLocal<v8::Array> config_keys = Nan::GetOwnPropertyNames(config);
|
||||||
|
|
||||||
|
if (!config_keys.IsEmpty()) {
|
||||||
|
v8::Local<v8::Array> field_array = config_keys.ToLocalChecked();
|
||||||
|
for (size_t i = 0; i < field_array->Length(); i++) {
|
||||||
|
v8::Local<v8::String> config_key = Nan::Get(field_array, i)
|
||||||
|
.ToLocalChecked().As<v8::String>();
|
||||||
|
v8::Local<v8::Value> config_value = Nan::Get(config, config_key)
|
||||||
|
.ToLocalChecked();
|
||||||
|
|
||||||
|
// If the config value is a string...
|
||||||
|
if (config_value->IsString()) {
|
||||||
|
Nan::Utf8String pKeyVal(config_key);
|
||||||
|
std::string pKeyString(*pKeyVal);
|
||||||
|
|
||||||
|
Nan::Utf8String pValueVal(config_value.As<v8::String>());
|
||||||
|
std::string pValString(*pValueVal);
|
||||||
|
|
||||||
|
err = rd_kafka_NewTopic_set_config(
|
||||||
|
new_topic, pKeyString.c_str(), pValString.c_str());
|
||||||
|
|
||||||
|
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
|
||||||
|
errstr = rd_kafka_err2str(err);
|
||||||
|
rd_kafka_NewTopic_destroy(new_topic);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
errstr = "Config values must all be provided as strings.";
|
||||||
|
rd_kafka_NewTopic_destroy(new_topic);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free it again cuz we malloc'd it.
|
||||||
|
// free(errbuf);
|
||||||
|
return new_topic;
|
||||||
|
}
|
||||||
|
|
||||||
|
rd_kafka_NewTopic_t** FromV8TopicObjectArray(v8::Local<v8::Array>) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Admin
|
||||||
|
|
||||||
|
} // namespace Conversion
|
||||||
|
|
||||||
|
namespace Util {
|
||||||
|
std::string FromV8String(v8::Local<v8::String> val) {
|
||||||
|
Nan::Utf8String keyUTF8(val);
|
||||||
|
return std::string(*keyUTF8);
|
||||||
|
}
|
||||||
|
} // Namespace Util
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
134
src/common.h
Normal file
134
src/common.h
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_COMMON_H_
|
||||||
|
#define SRC_COMMON_H_
|
||||||
|
|
||||||
|
#include <nan.h>
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
#include "rdkafka.h" // NOLINT
|
||||||
|
|
||||||
|
#include "src/errors.h"
|
||||||
|
|
||||||
|
typedef std::vector<const RdKafka::BrokerMetadata*> BrokerMetadataList;
|
||||||
|
typedef std::vector<const RdKafka::PartitionMetadata*> PartitionMetadataList;
|
||||||
|
typedef std::vector<const RdKafka::TopicMetadata *> TopicMetadataList;
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
void Log(std::string);
|
||||||
|
|
||||||
|
template<typename T> T GetParameter(v8::Local<v8::Object>, std::string, T);
|
||||||
|
template<> std::string GetParameter<std::string>(
|
||||||
|
v8::Local<v8::Object>, std::string, std::string);
|
||||||
|
template<> std::vector<std::string> GetParameter<std::vector<std::string> >(
|
||||||
|
v8::Local<v8::Object>, std::string, std::vector<std::string>);
|
||||||
|
// template int GetParameter<int>(v8::Local<v8::Object, std::string, int);
|
||||||
|
std::vector<std::string> v8ArrayToStringVector(v8::Local<v8::Array>);
|
||||||
|
|
||||||
|
class scoped_mutex_lock {
|
||||||
|
public:
|
||||||
|
explicit scoped_mutex_lock(uv_mutex_t& lock_) : // NOLINT
|
||||||
|
async_lock(lock_) {
|
||||||
|
uv_mutex_lock(&async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
~scoped_mutex_lock() {
|
||||||
|
uv_mutex_unlock(&async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
uv_mutex_t &async_lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock)
|
||||||
|
|
||||||
|
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock)
|
||||||
|
*/
|
||||||
|
|
||||||
|
class scoped_shared_write_lock {
|
||||||
|
public:
|
||||||
|
explicit scoped_shared_write_lock(uv_rwlock_t& lock_) : // NOLINT
|
||||||
|
async_lock(lock_) {
|
||||||
|
uv_rwlock_wrlock(&async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
~scoped_shared_write_lock() {
|
||||||
|
uv_rwlock_wrunlock(&async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
uv_rwlock_t &async_lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
class scoped_shared_read_lock {
|
||||||
|
public:
|
||||||
|
explicit scoped_shared_read_lock(uv_rwlock_t& lock_) : // NOLINT
|
||||||
|
async_lock(lock_) {
|
||||||
|
uv_rwlock_rdlock(&async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
~scoped_shared_read_lock() {
|
||||||
|
uv_rwlock_rdunlock(&async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
uv_rwlock_t &async_lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
namespace Conversion {
|
||||||
|
|
||||||
|
namespace Admin {
|
||||||
|
// Topics from topic object, or topic object array
|
||||||
|
rd_kafka_NewTopic_t* FromV8TopicObject(
|
||||||
|
v8::Local<v8::Object>, std::string &errstr); // NOLINT
|
||||||
|
rd_kafka_NewTopic_t** FromV8TopicObjectArray(v8::Local<v8::Array>);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Topic {
|
||||||
|
std::vector<std::string> ToStringVector(v8::Local<v8::Array>);
|
||||||
|
v8::Local<v8::Array> ToV8Array(std::vector<std::string>);
|
||||||
|
} // namespace Topic
|
||||||
|
|
||||||
|
namespace TopicPartition {
|
||||||
|
|
||||||
|
v8::Local<v8::Array> ToV8Array(std::vector<RdKafka::TopicPartition*> &);
|
||||||
|
RdKafka::TopicPartition * FromV8Object(v8::Local<v8::Object>);
|
||||||
|
std::vector<RdKafka::TopicPartition *> FromV8Array(const v8::Local<v8::Array> &); // NOLINT
|
||||||
|
|
||||||
|
} // namespace TopicPartition
|
||||||
|
|
||||||
|
namespace Metadata {
|
||||||
|
|
||||||
|
v8::Local<v8::Object> ToV8Object(RdKafka::Metadata*);
|
||||||
|
|
||||||
|
} // namespace Metadata
|
||||||
|
|
||||||
|
namespace Message {
|
||||||
|
|
||||||
|
v8::Local<v8::Object> ToV8Object(RdKafka::Message*);
|
||||||
|
v8::Local<v8::Object> ToV8Object(RdKafka::Message*, bool, bool);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Conversion
|
||||||
|
|
||||||
|
namespace Util {
|
||||||
|
std::string FromV8String(v8::Local<v8::String>);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_COMMON_H_
|
155
src/config.cc
Normal file
155
src/config.cc
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <list>
|
||||||
|
|
||||||
|
#include "src/config.h"
|
||||||
|
|
||||||
|
using Nan::MaybeLocal;
|
||||||
|
using Nan::Maybe;
|
||||||
|
using v8::Local;
|
||||||
|
using v8::String;
|
||||||
|
using v8::Object;
|
||||||
|
using std::cout;
|
||||||
|
using std::endl;
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
void Conf::DumpConfig(std::list<std::string> *dump) {
|
||||||
|
for (std::list<std::string>::iterator it = dump->begin();
|
||||||
|
it != dump->end(); ) {
|
||||||
|
std::cout << *it << " = ";
|
||||||
|
it++;
|
||||||
|
std::cout << *it << std::endl;
|
||||||
|
it++;
|
||||||
|
}
|
||||||
|
std::cout << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
Conf * Conf::create(RdKafka::Conf::ConfType type, v8::Local<v8::Object> object, std::string &errstr) { // NOLINT
|
||||||
|
v8::Local<v8::Context> context = Nan::GetCurrentContext();
|
||||||
|
Conf* rdconf = static_cast<Conf*>(RdKafka::Conf::create(type));
|
||||||
|
|
||||||
|
v8::MaybeLocal<v8::Array> _property_names = object->GetOwnPropertyNames(
|
||||||
|
Nan::GetCurrentContext());
|
||||||
|
v8::Local<v8::Array> property_names = _property_names.ToLocalChecked();
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i < property_names->Length(); ++i) {
|
||||||
|
std::string string_value;
|
||||||
|
std::string string_key;
|
||||||
|
|
||||||
|
v8::Local<v8::Value> key = Nan::Get(property_names, i).ToLocalChecked();
|
||||||
|
v8::Local<v8::Value> value = Nan::Get(object, key).ToLocalChecked();
|
||||||
|
|
||||||
|
if (key->IsString()) {
|
||||||
|
Nan::Utf8String utf8_key(key);
|
||||||
|
string_key = std::string(*utf8_key);
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!value->IsFunction()) {
|
||||||
|
#if NODE_MAJOR_VERSION > 6
|
||||||
|
if (value->IsInt32()) {
|
||||||
|
string_value = std::to_string(
|
||||||
|
value->Int32Value(context).ToChecked());
|
||||||
|
} else if (value->IsUint32()) {
|
||||||
|
string_value = std::to_string(
|
||||||
|
value->Uint32Value(context).ToChecked());
|
||||||
|
} else if (value->IsBoolean()) {
|
||||||
|
const bool v = Nan::To<bool>(value).ToChecked();
|
||||||
|
string_value = v ? "true" : "false";
|
||||||
|
} else {
|
||||||
|
Nan::Utf8String utf8_value(value.As<v8::String>());
|
||||||
|
string_value = std::string(*utf8_value);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
Nan::Utf8String utf8_value(value.As<v8::String>());
|
||||||
|
string_value = std::string(*utf8_value);
|
||||||
|
#endif
|
||||||
|
if (rdconf->set(string_key, string_value, errstr)
|
||||||
|
!= Conf::CONF_OK) {
|
||||||
|
delete rdconf;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
v8::Local<v8::Function> cb = value.As<v8::Function>();
|
||||||
|
rdconf->ConfigureCallback(string_key, cb, true, errstr);
|
||||||
|
if (!errstr.empty()) {
|
||||||
|
delete rdconf;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
rdconf->ConfigureCallback(string_key, cb, false, errstr);
|
||||||
|
if (!errstr.empty()) {
|
||||||
|
delete rdconf;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rdconf;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Conf::ConfigureCallback(const std::string &string_key, const v8::Local<v8::Function> &cb, bool add, std::string &errstr) {
|
||||||
|
if (string_key.compare("rebalance_cb") == 0) {
|
||||||
|
if (add) {
|
||||||
|
if (this->m_rebalance_cb == NULL) {
|
||||||
|
this->m_rebalance_cb = new NodeKafka::Callbacks::Rebalance();
|
||||||
|
}
|
||||||
|
this->m_rebalance_cb->dispatcher.AddCallback(cb);
|
||||||
|
this->set(string_key, this->m_rebalance_cb, errstr);
|
||||||
|
} else {
|
||||||
|
if (this->m_rebalance_cb != NULL) {
|
||||||
|
this->m_rebalance_cb->dispatcher.RemoveCallback(cb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (string_key.compare("offset_commit_cb") == 0) {
|
||||||
|
if (add) {
|
||||||
|
if (this->m_offset_commit_cb == NULL) {
|
||||||
|
this->m_offset_commit_cb = new NodeKafka::Callbacks::OffsetCommit();
|
||||||
|
}
|
||||||
|
this->m_offset_commit_cb->dispatcher.AddCallback(cb);
|
||||||
|
this->set(string_key, this->m_offset_commit_cb, errstr);
|
||||||
|
} else {
|
||||||
|
if (this->m_offset_commit_cb != NULL) {
|
||||||
|
this->m_offset_commit_cb->dispatcher.RemoveCallback(cb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Conf::listen() {
|
||||||
|
if (m_rebalance_cb) {
|
||||||
|
m_rebalance_cb->dispatcher.Activate();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_offset_commit_cb) {
|
||||||
|
m_offset_commit_cb->dispatcher.Activate();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Conf::stop() {
|
||||||
|
if (m_rebalance_cb) {
|
||||||
|
m_rebalance_cb->dispatcher.Deactivate();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_offset_commit_cb) {
|
||||||
|
m_offset_commit_cb->dispatcher.Deactivate();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Conf::~Conf() {
|
||||||
|
if (m_rebalance_cb) {
|
||||||
|
delete m_rebalance_cb;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
43
src/config.h
Normal file
43
src/config.h
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_CONFIG_H_
|
||||||
|
#define SRC_CONFIG_H_
|
||||||
|
|
||||||
|
#include <nan.h>
|
||||||
|
#include <iostream>
|
||||||
|
#include <vector>
|
||||||
|
#include <list>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
#include "src/common.h"
|
||||||
|
#include "src/callbacks.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
class Conf : public RdKafka::Conf {
|
||||||
|
public:
|
||||||
|
~Conf();
|
||||||
|
|
||||||
|
static Conf* create(RdKafka::Conf::ConfType, v8::Local<v8::Object>, std::string &); // NOLINT
|
||||||
|
static void DumpConfig(std::list<std::string> *);
|
||||||
|
|
||||||
|
void listen();
|
||||||
|
void stop();
|
||||||
|
|
||||||
|
void ConfigureCallback(const std::string &string_key, const v8::Local<v8::Function> &cb, bool add, std::string &errstr);
|
||||||
|
protected:
|
||||||
|
NodeKafka::Callbacks::Rebalance * m_rebalance_cb = NULL;
|
||||||
|
NodeKafka::Callbacks::OffsetCommit * m_offset_commit_cb = NULL;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_CONFIG_H_
|
423
src/connection.cc
Normal file
423
src/connection.cc
Normal file
@ -0,0 +1,423 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "src/connection.h"
|
||||||
|
#include "src/workers.h"
|
||||||
|
|
||||||
|
using RdKafka::Conf;
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Connection v8 wrapped object.
|
||||||
|
*
|
||||||
|
* Wraps the RdKafka::Handle object with compositional inheritence and
|
||||||
|
* provides sensible defaults for exposing callbacks to node
|
||||||
|
*
|
||||||
|
* This object can't itself expose methods to the prototype directly, as far
|
||||||
|
* as I can tell. But it can provide the NAN_METHODS that just need to be added
|
||||||
|
* to the prototype. Since connections, etc. are managed differently based on
|
||||||
|
* whether it is a producer or consumer, they manage that. This base class
|
||||||
|
* handles some of the wrapping functionality and more importantly, the
|
||||||
|
* configuration of callbacks
|
||||||
|
*
|
||||||
|
* Any callback available to both consumers and producers, like logging or
|
||||||
|
* events will be handled in here.
|
||||||
|
*
|
||||||
|
* @sa RdKafka::Handle
|
||||||
|
* @sa NodeKafka::Client
|
||||||
|
*/
|
||||||
|
|
||||||
|
Connection::Connection(Conf* gconfig, Conf* tconfig):
|
||||||
|
m_event_cb(),
|
||||||
|
m_gconfig(gconfig),
|
||||||
|
m_tconfig(tconfig) {
|
||||||
|
std::string errstr;
|
||||||
|
|
||||||
|
m_client = NULL;
|
||||||
|
m_is_closing = false;
|
||||||
|
uv_rwlock_init(&m_connection_lock);
|
||||||
|
|
||||||
|
// Try to set the event cb. Shouldn't be an error here, but if there
|
||||||
|
// is, it doesn't get reported.
|
||||||
|
//
|
||||||
|
// Perhaps node new methods should report this as an error? But there
|
||||||
|
// isn't anything the user can do about it.
|
||||||
|
m_gconfig->set("event_cb", &m_event_cb, errstr);
|
||||||
|
}
|
||||||
|
|
||||||
|
Connection::~Connection() {
|
||||||
|
uv_rwlock_destroy(&m_connection_lock);
|
||||||
|
|
||||||
|
if (m_tconfig) {
|
||||||
|
delete m_tconfig;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_gconfig) {
|
||||||
|
delete m_gconfig;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::TopicPartition* Connection::GetPartition(std::string &topic) {
|
||||||
|
return RdKafka::TopicPartition::create(topic, RdKafka::Topic::PARTITION_UA);
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::TopicPartition* Connection::GetPartition(std::string &topic, int partition) { // NOLINT
|
||||||
|
return RdKafka::TopicPartition::create(topic, partition);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Connection::IsConnected() {
|
||||||
|
return !m_is_closing && m_client != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Connection::IsClosing() {
|
||||||
|
return m_client != NULL && m_is_closing;
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::Handle* Connection::GetClient() {
|
||||||
|
return m_client;
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Connection::CreateTopic(std::string topic_name) {
|
||||||
|
return CreateTopic(topic_name, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Connection::CreateTopic(std::string topic_name, RdKafka::Conf* conf) {
|
||||||
|
std::string errstr;
|
||||||
|
|
||||||
|
RdKafka::Topic* topic = NULL;
|
||||||
|
|
||||||
|
if (IsConnected()) {
|
||||||
|
scoped_shared_read_lock lock(m_connection_lock);
|
||||||
|
if (IsConnected()) {
|
||||||
|
topic = RdKafka::Topic::create(m_client, topic_name, conf, errstr);
|
||||||
|
} else {
|
||||||
|
return Baton(RdKafka::ErrorCode::ERR__STATE);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Baton(RdKafka::ErrorCode::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!errstr.empty()) {
|
||||||
|
return Baton(RdKafka::ErrorCode::ERR_TOPIC_EXCEPTION, errstr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Maybe do it this way later? Then we don't need to do static_cast
|
||||||
|
// <RdKafka::Topic*>
|
||||||
|
return Baton(topic);
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Connection::QueryWatermarkOffsets(
|
||||||
|
std::string topic_name, int32_t partition,
|
||||||
|
int64_t* low_offset, int64_t* high_offset,
|
||||||
|
int timeout_ms) {
|
||||||
|
// Check if we are connected first
|
||||||
|
|
||||||
|
RdKafka::ErrorCode err;
|
||||||
|
|
||||||
|
if (IsConnected()) {
|
||||||
|
scoped_shared_read_lock lock(m_connection_lock);
|
||||||
|
if (IsConnected()) {
|
||||||
|
// Always send true - we
|
||||||
|
err = m_client->query_watermark_offsets(topic_name, partition,
|
||||||
|
low_offset, high_offset, timeout_ms);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
err = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baton(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Look up the offsets for the given partitions by timestamp.
|
||||||
|
*
|
||||||
|
* The returned offset for each partition is the earliest offset whose
|
||||||
|
* timestamp is greater than or equal to the given timestamp in the
|
||||||
|
* corresponding partition.
|
||||||
|
*
|
||||||
|
* @returns A baton specifying the error state. If there was no error,
|
||||||
|
* there still may be an error on a topic partition basis.
|
||||||
|
*/
|
||||||
|
Baton Connection::OffsetsForTimes(
|
||||||
|
std::vector<RdKafka::TopicPartition*> &toppars,
|
||||||
|
int timeout_ms) {
|
||||||
|
// Check if we are connected first
|
||||||
|
|
||||||
|
RdKafka::ErrorCode err;
|
||||||
|
|
||||||
|
if (IsConnected()) {
|
||||||
|
scoped_shared_read_lock lock(m_connection_lock);
|
||||||
|
if (IsConnected()) {
|
||||||
|
// Always send true - we
|
||||||
|
err = m_client->offsetsForTimes(toppars, timeout_ms);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
err = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baton(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Connection::GetMetadata(
|
||||||
|
bool all_topics, std::string topic_name, int timeout_ms) {
|
||||||
|
RdKafka::Topic* topic = NULL;
|
||||||
|
RdKafka::ErrorCode err;
|
||||||
|
|
||||||
|
std::string errstr;
|
||||||
|
|
||||||
|
if (!topic_name.empty()) {
|
||||||
|
Baton b = CreateTopic(topic_name);
|
||||||
|
if (b.err() == RdKafka::ErrorCode::ERR_NO_ERROR) {
|
||||||
|
topic = b.data<RdKafka::Topic*>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::Metadata* metadata = NULL;
|
||||||
|
|
||||||
|
if (!errstr.empty()) {
|
||||||
|
return Baton(RdKafka::ERR_TOPIC_EXCEPTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IsConnected()) {
|
||||||
|
scoped_shared_read_lock lock(m_connection_lock);
|
||||||
|
if (IsConnected()) {
|
||||||
|
// Always send true - we
|
||||||
|
err = m_client->metadata(all_topics, topic, &metadata, timeout_ms);
|
||||||
|
} else {
|
||||||
|
err = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err == RdKafka::ERR_NO_ERROR) {
|
||||||
|
return Baton(metadata);
|
||||||
|
} else {
|
||||||
|
// metadata is not set here
|
||||||
|
// @see https://github.com/edenhill/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L860
|
||||||
|
return Baton(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Connection::ConfigureCallback(const std::string &string_key, const v8::Local<v8::Function> &cb, bool add) {
|
||||||
|
if (string_key.compare("event_cb") == 0) {
|
||||||
|
if (add) {
|
||||||
|
this->m_event_cb.dispatcher.AddCallback(cb);
|
||||||
|
} else {
|
||||||
|
this->m_event_cb.dispatcher.RemoveCallback(cb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NAN METHODS
|
||||||
|
|
||||||
|
NAN_METHOD(Connection::NodeGetMetadata) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
Connection* obj = ObjectWrap::Unwrap<Connection>(info.This());
|
||||||
|
|
||||||
|
v8::Local<v8::Object> config;
|
||||||
|
if (info[0]->IsObject()) {
|
||||||
|
config = info[0].As<v8::Object>();
|
||||||
|
} else {
|
||||||
|
config = Nan::New<v8::Object>();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[1]->IsFunction()) {
|
||||||
|
Nan::ThrowError("Second parameter must be a callback");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Function> cb = info[1].As<v8::Function>();
|
||||||
|
|
||||||
|
std::string topic = GetParameter<std::string>(config, "topic", "");
|
||||||
|
bool allTopics = GetParameter<bool>(config, "allTopics", true);
|
||||||
|
int timeout_ms = GetParameter<int64_t>(config, "timeout", 30000);
|
||||||
|
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Nan::AsyncQueueWorker(new Workers::ConnectionMetadata(
|
||||||
|
callback, obj, topic, timeout_ms, allTopics));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Connection::NodeOffsetsForTimes) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 3 || !info[0]->IsArray()) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify an array of topic partitions");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<RdKafka::TopicPartition *> toppars =
|
||||||
|
Conversion::TopicPartition::FromV8Array(info[0].As<v8::Array>());
|
||||||
|
|
||||||
|
int timeout_ms;
|
||||||
|
Nan::Maybe<uint32_t> maybeTimeout =
|
||||||
|
Nan::To<uint32_t>(info[1].As<v8::Number>());
|
||||||
|
|
||||||
|
if (maybeTimeout.IsNothing()) {
|
||||||
|
timeout_ms = 1000;
|
||||||
|
} else {
|
||||||
|
timeout_ms = static_cast<int>(maybeTimeout.FromJust());
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Function> cb = info[2].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Connection* handle = ObjectWrap::Unwrap<Connection>(info.This());
|
||||||
|
|
||||||
|
Nan::AsyncQueueWorker(
|
||||||
|
new Workers::Handle::OffsetsForTimes(callback, handle,
|
||||||
|
toppars, timeout_ms));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Connection::NodeQueryWatermarkOffsets) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
Connection* obj = ObjectWrap::Unwrap<Connection>(info.This());
|
||||||
|
|
||||||
|
if (!info[0]->IsString()) {
|
||||||
|
Nan::ThrowError("1st parameter must be a topic string");;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[1]->IsNumber()) {
|
||||||
|
Nan::ThrowError("2nd parameter must be a partition number");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[2]->IsNumber()) {
|
||||||
|
Nan::ThrowError("3rd parameter must be a number of milliseconds");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[3]->IsFunction()) {
|
||||||
|
Nan::ThrowError("4th parameter must be a callback");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get string pointer for the topic name
|
||||||
|
Nan::Utf8String topicUTF8(Nan::To<v8::String>(info[0]).ToLocalChecked());
|
||||||
|
// The first parameter is the topic
|
||||||
|
std::string topic_name(*topicUTF8);
|
||||||
|
|
||||||
|
// Second parameter is the partition
|
||||||
|
int32_t partition = Nan::To<int32_t>(info[1]).FromJust();
|
||||||
|
|
||||||
|
// Third parameter is the timeout
|
||||||
|
int timeout_ms = Nan::To<int>(info[2]).FromJust();
|
||||||
|
|
||||||
|
// Fourth parameter is the callback
|
||||||
|
v8::Local<v8::Function> cb = info[3].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Nan::AsyncQueueWorker(new Workers::ConnectionQueryWatermarkOffsets(
|
||||||
|
callback, obj, topic_name, partition, timeout_ms));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node methods
|
||||||
|
NAN_METHOD(Connection::NodeConfigureCallbacks) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 2 ||
|
||||||
|
!info[0]->IsBoolean() ||
|
||||||
|
!info[1]->IsObject()) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a callbacks object");
|
||||||
|
}
|
||||||
|
v8::Local<v8::Context> context = Nan::GetCurrentContext();
|
||||||
|
Connection* obj = ObjectWrap::Unwrap<Connection>(info.This());
|
||||||
|
|
||||||
|
const bool add = Nan::To<bool>(info[0]).ToChecked();
|
||||||
|
v8::Local<v8::Object> configs_object = info[1]->ToObject(context).ToLocalChecked();
|
||||||
|
v8::Local<v8::Array> configs_property_names = configs_object->GetOwnPropertyNames(context).ToLocalChecked();
|
||||||
|
|
||||||
|
for (unsigned int j = 0; j < configs_property_names->Length(); ++j) {
|
||||||
|
std::string configs_string_key;
|
||||||
|
|
||||||
|
v8::Local<v8::Value> configs_key = Nan::Get(configs_property_names, j).ToLocalChecked();
|
||||||
|
v8::Local<v8::Value> configs_value = Nan::Get(configs_object, configs_key).ToLocalChecked();
|
||||||
|
|
||||||
|
int config_type = 0;
|
||||||
|
if (configs_value->IsObject() && configs_key->IsString()) {
|
||||||
|
Nan::Utf8String configs_utf8_key(configs_key);
|
||||||
|
configs_string_key = std::string(*configs_utf8_key);
|
||||||
|
if (configs_string_key.compare("global") == 0) {
|
||||||
|
config_type = 1;
|
||||||
|
} else if (configs_string_key.compare("topic") == 0) {
|
||||||
|
config_type = 2;
|
||||||
|
} else if (configs_string_key.compare("event") == 0) {
|
||||||
|
config_type = 3;
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Object> object = configs_value->ToObject(context).ToLocalChecked();
|
||||||
|
v8::Local<v8::Array> property_names = object->GetOwnPropertyNames(context).ToLocalChecked();
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i < property_names->Length(); ++i) {
|
||||||
|
std::string errstr;
|
||||||
|
std::string string_key;
|
||||||
|
|
||||||
|
v8::Local<v8::Value> key = Nan::Get(property_names, i).ToLocalChecked();
|
||||||
|
v8::Local<v8::Value> value = Nan::Get(object, key).ToLocalChecked();
|
||||||
|
|
||||||
|
if (key->IsString()) {
|
||||||
|
Nan::Utf8String utf8_key(key);
|
||||||
|
string_key = std::string(*utf8_key);
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (value->IsFunction()) {
|
||||||
|
v8::Local<v8::Function> cb = value.As<v8::Function>();
|
||||||
|
switch (config_type) {
|
||||||
|
case 1:
|
||||||
|
obj->m_gconfig->ConfigureCallback(string_key, cb, add, errstr);
|
||||||
|
if (!errstr.empty()) {
|
||||||
|
return Nan::ThrowError(errstr.c_str());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
obj->m_tconfig->ConfigureCallback(string_key, cb, add, errstr);
|
||||||
|
if (!errstr.empty()) {
|
||||||
|
return Nan::ThrowError(errstr.c_str());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
obj->ConfigureCallback(string_key, cb, add);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::True());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
97
src/connection.h
Normal file
97
src/connection.h
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_CONNECTION_H_
|
||||||
|
#define SRC_CONNECTION_H_
|
||||||
|
|
||||||
|
#include <nan.h>
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
|
||||||
|
#include "src/common.h"
|
||||||
|
#include "src/errors.h"
|
||||||
|
#include "src/config.h"
|
||||||
|
#include "src/callbacks.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Connection v8 wrapped object.
|
||||||
|
*
|
||||||
|
* Wraps the RdKafka::Handle object with compositional inheritence and
|
||||||
|
* provides sensible defaults for exposing callbacks to node
|
||||||
|
*
|
||||||
|
* This object can't itself expose methods to the prototype directly, as far
|
||||||
|
* as I can tell. But it can provide the NAN_METHODS that just need to be added
|
||||||
|
* to the prototype. Since connections, etc. are managed differently based on
|
||||||
|
* whether it is a producer or consumer, they manage that. This base class
|
||||||
|
* handles some of the wrapping functionality and more importantly, the
|
||||||
|
* configuration of callbacks
|
||||||
|
*
|
||||||
|
* Any callback available to both consumers and producers, like logging or
|
||||||
|
* events will be handled in here.
|
||||||
|
*
|
||||||
|
* @sa RdKafka::Handle
|
||||||
|
* @sa NodeKafka::Client
|
||||||
|
*/
|
||||||
|
|
||||||
|
class Connection : public Nan::ObjectWrap {
|
||||||
|
public:
|
||||||
|
bool IsConnected();
|
||||||
|
bool IsClosing();
|
||||||
|
|
||||||
|
// Baton<RdKafka::Topic*>
|
||||||
|
Baton CreateTopic(std::string);
|
||||||
|
Baton CreateTopic(std::string, RdKafka::Conf*);
|
||||||
|
Baton GetMetadata(bool, std::string, int);
|
||||||
|
Baton QueryWatermarkOffsets(std::string, int32_t, int64_t*, int64_t*, int);
|
||||||
|
Baton OffsetsForTimes(std::vector<RdKafka::TopicPartition*> &, int);
|
||||||
|
|
||||||
|
RdKafka::Handle* GetClient();
|
||||||
|
|
||||||
|
static RdKafka::TopicPartition* GetPartition(std::string &);
|
||||||
|
static RdKafka::TopicPartition* GetPartition(std::string &, int);
|
||||||
|
|
||||||
|
Callbacks::Event m_event_cb;
|
||||||
|
|
||||||
|
virtual void ActivateDispatchers() = 0;
|
||||||
|
virtual void DeactivateDispatchers() = 0;
|
||||||
|
|
||||||
|
virtual void ConfigureCallback(const std::string &string_key, const v8::Local<v8::Function> &cb, bool add);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
Connection(Conf*, Conf*);
|
||||||
|
~Connection();
|
||||||
|
|
||||||
|
static Nan::Persistent<v8::Function> constructor;
|
||||||
|
static void New(const Nan::FunctionCallbackInfo<v8::Value>& info);
|
||||||
|
|
||||||
|
bool m_has_been_disconnected;
|
||||||
|
bool m_is_closing;
|
||||||
|
|
||||||
|
Conf* m_gconfig;
|
||||||
|
Conf* m_tconfig;
|
||||||
|
std::string m_errstr;
|
||||||
|
|
||||||
|
uv_rwlock_t m_connection_lock;
|
||||||
|
|
||||||
|
RdKafka::Handle* m_client;
|
||||||
|
|
||||||
|
static NAN_METHOD(NodeConfigureCallbacks);
|
||||||
|
static NAN_METHOD(NodeGetMetadata);
|
||||||
|
static NAN_METHOD(NodeQueryWatermarkOffsets);
|
||||||
|
static NAN_METHOD(NodeOffsetsForTimes);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_CONNECTION_H_
|
96
src/errors.cc
Normal file
96
src/errors.cc
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "src/errors.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
v8::Local<v8::Object> RdKafkaError(const RdKafka::ErrorCode &err, std::string errstr) { // NOLINT
|
||||||
|
//
|
||||||
|
int code = static_cast<int>(err);
|
||||||
|
|
||||||
|
v8::Local<v8::Object> ret = Nan::New<v8::Object>();
|
||||||
|
|
||||||
|
Nan::Set(ret, Nan::New("message").ToLocalChecked(),
|
||||||
|
Nan::New<v8::String>(errstr).ToLocalChecked());
|
||||||
|
Nan::Set(ret, Nan::New("code").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Number>(code));
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Object> RdKafkaError(const RdKafka::ErrorCode &err) {
|
||||||
|
return RdKafkaError(err, RdKafka::err2str(err));
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Object> RdKafkaError(const RdKafka::ErrorCode &err, std::string errstr,
|
||||||
|
bool isFatal, bool isRetriable, bool isTxnRequiresAbort) {
|
||||||
|
v8::Local<v8::Object> ret = RdKafkaError(err, errstr);
|
||||||
|
|
||||||
|
Nan::Set(ret, Nan::New("isFatal").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Boolean>(isFatal));
|
||||||
|
Nan::Set(ret, Nan::New("isRetriable").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Boolean>(isRetriable));
|
||||||
|
Nan::Set(ret, Nan::New("isTxnRequiresAbort").ToLocalChecked(),
|
||||||
|
Nan::New<v8::Boolean>(isTxnRequiresAbort));
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton::Baton(const RdKafka::ErrorCode &code) {
|
||||||
|
m_err = code;
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton::Baton(const RdKafka::ErrorCode &code, std::string errstr) {
|
||||||
|
m_err = code;
|
||||||
|
m_errstr = errstr;
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton::Baton(void* data) {
|
||||||
|
m_err = RdKafka::ERR_NO_ERROR;
|
||||||
|
m_data = data;
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton::Baton(const RdKafka::ErrorCode &code, std::string errstr, bool isFatal,
|
||||||
|
bool isRetriable, bool isTxnRequiresAbort) {
|
||||||
|
m_err = code;
|
||||||
|
m_errstr = errstr;
|
||||||
|
m_isFatal = isFatal;
|
||||||
|
m_isRetriable = isRetriable;
|
||||||
|
m_isTxnRequiresAbort = isTxnRequiresAbort;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
v8::Local<v8::Object> Baton::ToObject() {
|
||||||
|
if (m_errstr.empty()) {
|
||||||
|
return RdKafkaError(m_err);
|
||||||
|
} else {
|
||||||
|
return RdKafkaError(m_err, m_errstr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Object> Baton::ToTxnObject() {
|
||||||
|
return RdKafkaError(m_err, m_errstr, m_isFatal, m_isRetriable, m_isTxnRequiresAbort);
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::ErrorCode Baton::err() {
|
||||||
|
return m_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string Baton::errstr() {
|
||||||
|
if (m_errstr.empty()) {
|
||||||
|
return RdKafka::err2str(m_err);
|
||||||
|
} else {
|
||||||
|
return m_errstr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
54
src/errors.h
Normal file
54
src/errors.h
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_ERRORS_H_
|
||||||
|
#define SRC_ERRORS_H_
|
||||||
|
|
||||||
|
#include <nan.h>
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
|
||||||
|
#include "src/common.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
class Baton {
|
||||||
|
public:
|
||||||
|
explicit Baton(const RdKafka::ErrorCode &);
|
||||||
|
explicit Baton(void* data);
|
||||||
|
explicit Baton(const RdKafka::ErrorCode &, std::string);
|
||||||
|
explicit Baton(const RdKafka::ErrorCode &, std::string, bool isFatal,
|
||||||
|
bool isRetriable, bool isTxnRequiresAbort);
|
||||||
|
|
||||||
|
template<typename T> T data() {
|
||||||
|
return static_cast<T>(m_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::ErrorCode err();
|
||||||
|
std::string errstr();
|
||||||
|
|
||||||
|
v8::Local<v8::Object> ToObject();
|
||||||
|
v8::Local<v8::Object> ToTxnObject();
|
||||||
|
|
||||||
|
private:
|
||||||
|
void* m_data;
|
||||||
|
std::string m_errstr;
|
||||||
|
RdKafka::ErrorCode m_err;
|
||||||
|
bool m_isFatal;
|
||||||
|
bool m_isRetriable;
|
||||||
|
bool m_isTxnRequiresAbort;
|
||||||
|
};
|
||||||
|
|
||||||
|
v8::Local<v8::Object> RdKafkaError(const RdKafka::ErrorCode &);
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_ERRORS_H_
|
1229
src/kafka-consumer.cc
Normal file
1229
src/kafka-consumer.cc
Normal file
File diff suppressed because it is too large
Load Diff
126
src/kafka-consumer.h
Normal file
126
src/kafka-consumer.h
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_KAFKA_CONSUMER_H_
|
||||||
|
#define SRC_KAFKA_CONSUMER_H_
|
||||||
|
|
||||||
|
#include <nan.h>
|
||||||
|
#include <uv.h>
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
|
||||||
|
#include "src/common.h"
|
||||||
|
#include "src/connection.h"
|
||||||
|
#include "src/callbacks.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief KafkaConsumer v8 wrapped object.
|
||||||
|
*
|
||||||
|
* Specializes the connection to wrap a consumer object through compositional
|
||||||
|
* inheritence. Establishes its prototype in node through `Init`
|
||||||
|
*
|
||||||
|
* @sa RdKafka::Handle
|
||||||
|
* @sa NodeKafka::Client
|
||||||
|
*/
|
||||||
|
|
||||||
|
class KafkaConsumer : public Connection {
|
||||||
|
friend class Producer;
|
||||||
|
public:
|
||||||
|
static void Init(v8::Local<v8::Object>);
|
||||||
|
static v8::Local<v8::Object> NewInstance(v8::Local<v8::Value>);
|
||||||
|
|
||||||
|
Baton Connect();
|
||||||
|
Baton Disconnect();
|
||||||
|
|
||||||
|
Baton Subscription();
|
||||||
|
Baton Unsubscribe();
|
||||||
|
bool IsSubscribed();
|
||||||
|
|
||||||
|
Baton Pause(std::vector<RdKafka::TopicPartition*> &);
|
||||||
|
Baton Resume(std::vector<RdKafka::TopicPartition*> &);
|
||||||
|
|
||||||
|
// Asynchronous commit events
|
||||||
|
Baton Commit(std::vector<RdKafka::TopicPartition*>);
|
||||||
|
Baton Commit(RdKafka::TopicPartition*);
|
||||||
|
Baton Commit();
|
||||||
|
|
||||||
|
Baton OffsetsStore(std::vector<RdKafka::TopicPartition*> &);
|
||||||
|
Baton GetWatermarkOffsets(std::string, int32_t, int64_t*, int64_t*);
|
||||||
|
|
||||||
|
// Synchronous commit events
|
||||||
|
Baton CommitSync(std::vector<RdKafka::TopicPartition*>);
|
||||||
|
Baton CommitSync(RdKafka::TopicPartition*);
|
||||||
|
Baton CommitSync();
|
||||||
|
|
||||||
|
Baton Committed(std::vector<RdKafka::TopicPartition*> &, int timeout_ms);
|
||||||
|
Baton Position(std::vector<RdKafka::TopicPartition*> &);
|
||||||
|
|
||||||
|
Baton RefreshAssignments();
|
||||||
|
|
||||||
|
bool HasAssignedPartitions();
|
||||||
|
int AssignedPartitionCount();
|
||||||
|
|
||||||
|
Baton Assign(std::vector<RdKafka::TopicPartition*>);
|
||||||
|
Baton Unassign();
|
||||||
|
|
||||||
|
Baton Seek(const RdKafka::TopicPartition &partition, int timeout_ms);
|
||||||
|
|
||||||
|
std::string Name();
|
||||||
|
|
||||||
|
Baton Subscribe(std::vector<std::string>);
|
||||||
|
Baton Consume(int timeout_ms);
|
||||||
|
|
||||||
|
void ActivateDispatchers();
|
||||||
|
void DeactivateDispatchers();
|
||||||
|
|
||||||
|
protected:
|
||||||
|
static Nan::Persistent<v8::Function> constructor;
|
||||||
|
static void New(const Nan::FunctionCallbackInfo<v8::Value>& info);
|
||||||
|
|
||||||
|
KafkaConsumer(Conf *, Conf *);
|
||||||
|
~KafkaConsumer();
|
||||||
|
|
||||||
|
private:
|
||||||
|
static void part_list_print(const std::vector<RdKafka::TopicPartition*>&);
|
||||||
|
|
||||||
|
std::vector<RdKafka::TopicPartition*> m_partitions;
|
||||||
|
int m_partition_cnt;
|
||||||
|
bool m_is_subscribed = false;
|
||||||
|
|
||||||
|
// Node methods
|
||||||
|
static NAN_METHOD(NodeConnect);
|
||||||
|
static NAN_METHOD(NodeSubscribe);
|
||||||
|
static NAN_METHOD(NodeDisconnect);
|
||||||
|
static NAN_METHOD(NodeAssign);
|
||||||
|
static NAN_METHOD(NodeUnassign);
|
||||||
|
static NAN_METHOD(NodeAssignments);
|
||||||
|
static NAN_METHOD(NodeUnsubscribe);
|
||||||
|
static NAN_METHOD(NodeCommit);
|
||||||
|
static NAN_METHOD(NodeCommitSync);
|
||||||
|
static NAN_METHOD(NodeOffsetsStore);
|
||||||
|
static NAN_METHOD(NodeCommitted);
|
||||||
|
static NAN_METHOD(NodePosition);
|
||||||
|
static NAN_METHOD(NodeSubscription);
|
||||||
|
static NAN_METHOD(NodeSeek);
|
||||||
|
static NAN_METHOD(NodeGetWatermarkOffsets);
|
||||||
|
static NAN_METHOD(NodeConsumeLoop);
|
||||||
|
static NAN_METHOD(NodeConsume);
|
||||||
|
|
||||||
|
static NAN_METHOD(NodePause);
|
||||||
|
static NAN_METHOD(NodeResume);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_KAFKA_CONSUMER_H_
|
849
src/producer.cc
Normal file
849
src/producer.cc
Normal file
@ -0,0 +1,849 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "src/producer.h"
|
||||||
|
#include "src/kafka-consumer.h"
|
||||||
|
#include "src/workers.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Producer v8 wrapped object.
|
||||||
|
*
|
||||||
|
* Wraps the RdKafka::Producer object with compositional inheritence and
|
||||||
|
* provides methods for interacting with it exposed to node.
|
||||||
|
*
|
||||||
|
* The base wrappable RdKafka::Handle deals with most of the wrapping but
|
||||||
|
* we still need to declare its prototype.
|
||||||
|
*
|
||||||
|
* @sa RdKafka::Producer
|
||||||
|
* @sa NodeKafka::Connection
|
||||||
|
*/
|
||||||
|
|
||||||
|
Producer::Producer(Conf* gconfig, Conf* tconfig):
|
||||||
|
Connection(gconfig, tconfig),
|
||||||
|
m_dr_cb(),
|
||||||
|
m_partitioner_cb() {
|
||||||
|
std::string errstr;
|
||||||
|
|
||||||
|
m_gconfig->set("default_topic_conf", m_tconfig, errstr);
|
||||||
|
m_gconfig->set("dr_cb", &m_dr_cb, errstr);
|
||||||
|
}
|
||||||
|
|
||||||
|
Producer::~Producer() {
|
||||||
|
Disconnect();
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Persistent<v8::Function> Producer::constructor;
|
||||||
|
|
||||||
|
void Producer::Init(v8::Local<v8::Object> exports) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
v8::Local<v8::FunctionTemplate> tpl = Nan::New<v8::FunctionTemplate>(New);
|
||||||
|
tpl->SetClassName(Nan::New("Producer").ToLocalChecked());
|
||||||
|
tpl->InstanceTemplate()->SetInternalFieldCount(1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Lifecycle events inherited from NodeKafka::Connection
|
||||||
|
*
|
||||||
|
* @sa NodeKafka::Connection
|
||||||
|
*/
|
||||||
|
|
||||||
|
Nan::SetPrototypeMethod(tpl, "configureCallbacks", NodeConfigureCallbacks);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @brief Methods to do with establishing state
|
||||||
|
*/
|
||||||
|
|
||||||
|
Nan::SetPrototypeMethod(tpl, "connect", NodeConnect);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "getMetadata", NodeGetMetadata);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "queryWatermarkOffsets", NodeQueryWatermarkOffsets); // NOLINT
|
||||||
|
Nan::SetPrototypeMethod(tpl, "poll", NodePoll);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @brief Methods exposed to do with message production
|
||||||
|
*/
|
||||||
|
|
||||||
|
Nan::SetPrototypeMethod(tpl, "setPartitioner", NodeSetPartitioner);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "produce", NodeProduce);
|
||||||
|
|
||||||
|
Nan::SetPrototypeMethod(tpl, "flush", NodeFlush);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @brief Methods exposed to do with transactions
|
||||||
|
*/
|
||||||
|
|
||||||
|
Nan::SetPrototypeMethod(tpl, "initTransactions", NodeInitTransactions);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "beginTransaction", NodeBeginTransaction);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "commitTransaction", NodeCommitTransaction);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "abortTransaction", NodeAbortTransaction);
|
||||||
|
Nan::SetPrototypeMethod(tpl, "sendOffsetsToTransaction", NodeSendOffsetsToTransaction);
|
||||||
|
|
||||||
|
// connect. disconnect. resume. pause. get meta data
|
||||||
|
constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext()))
|
||||||
|
.ToLocalChecked());
|
||||||
|
|
||||||
|
Nan::Set(exports, Nan::New("Producer").ToLocalChecked(),
|
||||||
|
tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::New(const Nan::FunctionCallbackInfo<v8::Value>& info) {
|
||||||
|
if (!info.IsConstructCall()) {
|
||||||
|
return Nan::ThrowError("non-constructor invocation not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info.Length() < 2) {
|
||||||
|
return Nan::ThrowError("You must supply global and topic configuration");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[0]->IsObject()) {
|
||||||
|
return Nan::ThrowError("Global configuration data must be specified");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[1]->IsObject()) {
|
||||||
|
return Nan::ThrowError("Topic configuration must be specified");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string errstr;
|
||||||
|
|
||||||
|
Conf* gconfig =
|
||||||
|
Conf::create(RdKafka::Conf::CONF_GLOBAL,
|
||||||
|
(info[0]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr);
|
||||||
|
|
||||||
|
if (!gconfig) {
|
||||||
|
return Nan::ThrowError(errstr.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
Conf* tconfig =
|
||||||
|
Conf::create(RdKafka::Conf::CONF_TOPIC,
|
||||||
|
(info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr);
|
||||||
|
|
||||||
|
if (!tconfig) {
|
||||||
|
// No longer need this since we aren't instantiating anything
|
||||||
|
delete gconfig;
|
||||||
|
return Nan::ThrowError(errstr.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
Producer* producer = new Producer(gconfig, tconfig);
|
||||||
|
|
||||||
|
// Wrap it
|
||||||
|
producer->Wrap(info.This());
|
||||||
|
|
||||||
|
// Then there is some weird initialization that happens
|
||||||
|
// basically it sets the configuration data
|
||||||
|
// we don't need to do that because we lazy load it
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(info.This());
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Object> Producer::NewInstance(v8::Local<v8::Value> arg) {
|
||||||
|
Nan::EscapableHandleScope scope;
|
||||||
|
|
||||||
|
const unsigned argc = 1;
|
||||||
|
|
||||||
|
v8::Local<v8::Value> argv[argc] = { arg };
|
||||||
|
v8::Local<v8::Function> cons = Nan::New<v8::Function>(constructor);
|
||||||
|
v8::Local<v8::Object> instance =
|
||||||
|
Nan::NewInstance(cons, argc, argv).ToLocalChecked();
|
||||||
|
|
||||||
|
return scope.Escape(instance);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::string Producer::Name() {
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return std::string("");
|
||||||
|
}
|
||||||
|
return std::string(m_client->name());
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Producer::Connect() {
|
||||||
|
if (IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string errstr;
|
||||||
|
{
|
||||||
|
scoped_shared_read_lock lock(m_connection_lock);
|
||||||
|
m_client = RdKafka::Producer::create(m_gconfig, errstr);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!m_client) {
|
||||||
|
// @todo implement errstr into this somehow
|
||||||
|
return Baton(RdKafka::ERR__STATE, errstr);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::ActivateDispatchers() {
|
||||||
|
m_event_cb.dispatcher.Activate(); // From connection
|
||||||
|
m_dr_cb.dispatcher.Activate();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::DeactivateDispatchers() {
|
||||||
|
m_event_cb.dispatcher.Deactivate(); // From connection
|
||||||
|
m_dr_cb.dispatcher.Deactivate();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::Disconnect() {
|
||||||
|
if (IsConnected()) {
|
||||||
|
scoped_shared_write_lock lock(m_connection_lock);
|
||||||
|
delete m_client;
|
||||||
|
m_client = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Producer::Produce description]
|
||||||
|
* @param message - pointer to the message we are sending. This method will
|
||||||
|
* create a copy of it, so you are still required to free it when done.
|
||||||
|
* @param size - size of the message. We are copying the memory so we need
|
||||||
|
* the size
|
||||||
|
* @param topic - RdKafka::Topic* object to send the message to. Generally
|
||||||
|
* created by NodeKafka::Topic::toRDKafkaTopic
|
||||||
|
* @param partition - partition to send it to. Send in
|
||||||
|
* RdKafka::Topic::PARTITION_UA to send to an unassigned topic
|
||||||
|
* @param key - a string pointer for the key, or null if there is none.
|
||||||
|
* @return - A baton object with error code set if it failed.
|
||||||
|
*/
|
||||||
|
Baton Producer::Produce(void* message, size_t size, RdKafka::Topic* topic,
|
||||||
|
int32_t partition, const void *key, size_t key_len, void* opaque) {
|
||||||
|
RdKafka::ErrorCode response_code;
|
||||||
|
|
||||||
|
if (IsConnected()) {
|
||||||
|
scoped_shared_read_lock lock(m_connection_lock);
|
||||||
|
if (IsConnected()) {
|
||||||
|
RdKafka::Producer* producer = dynamic_cast<RdKafka::Producer*>(m_client);
|
||||||
|
response_code = producer->produce(topic, partition,
|
||||||
|
RdKafka::Producer::RK_MSG_COPY,
|
||||||
|
message, size, key, key_len, opaque);
|
||||||
|
} else {
|
||||||
|
response_code = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
response_code = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// These topics actually link to the configuration
|
||||||
|
// they are made from. It's so we can reuse topic configurations
|
||||||
|
// That means if we delete it here and librd thinks its still linked,
|
||||||
|
// producing to the same topic will try to reuse it and it will die.
|
||||||
|
//
|
||||||
|
// Honestly, we may need to make configuration a first class object
|
||||||
|
// @todo(Conf needs to be a first class object that is passed around)
|
||||||
|
// delete topic;
|
||||||
|
|
||||||
|
if (response_code != RdKafka::ERR_NO_ERROR) {
|
||||||
|
return Baton(response_code);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Producer::Produce description]
|
||||||
|
* @param message - pointer to the message we are sending. This method will
|
||||||
|
* create a copy of it, so you are still required to free it when done.
|
||||||
|
* @param size - size of the message. We are copying the memory so we need
|
||||||
|
* the size
|
||||||
|
* @param topic - String topic to use so we do not need to create
|
||||||
|
* an RdKafka::Topic*
|
||||||
|
* @param partition - partition to send it to. Send in
|
||||||
|
* RdKafka::Topic::PARTITION_UA to send to an unassigned topic
|
||||||
|
* @param key - a string pointer for the key, or null if there is none.
|
||||||
|
* @return - A baton object with error code set if it failed.
|
||||||
|
*/
|
||||||
|
Baton Producer::Produce(void* message, size_t size, std::string topic,
|
||||||
|
int32_t partition, std::string *key, int64_t timestamp, void* opaque,
|
||||||
|
RdKafka::Headers* headers) {
|
||||||
|
return Produce(message, size, topic, partition,
|
||||||
|
key ? key->data() : NULL, key ? key->size() : 0,
|
||||||
|
timestamp, opaque, headers);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Producer::Produce description]
|
||||||
|
* @param message - pointer to the message we are sending. This method will
|
||||||
|
* create a copy of it, so you are still required to free it when done.
|
||||||
|
* @param size - size of the message. We are copying the memory so we need
|
||||||
|
* the size
|
||||||
|
* @param topic - String topic to use so we do not need to create
|
||||||
|
* an RdKafka::Topic*
|
||||||
|
* @param partition - partition to send it to. Send in
|
||||||
|
* RdKafka::Topic::PARTITION_UA to send to an unassigned topic
|
||||||
|
* @param key - a string pointer for the key, or null if there is none.
|
||||||
|
* @return - A baton object with error code set if it failed.
|
||||||
|
*/
|
||||||
|
Baton Producer::Produce(void* message, size_t size, std::string topic,
|
||||||
|
int32_t partition, const void *key, size_t key_len,
|
||||||
|
int64_t timestamp, void* opaque, RdKafka::Headers* headers) {
|
||||||
|
RdKafka::ErrorCode response_code;
|
||||||
|
|
||||||
|
if (IsConnected()) {
|
||||||
|
scoped_shared_read_lock lock(m_connection_lock);
|
||||||
|
if (IsConnected()) {
|
||||||
|
RdKafka::Producer* producer = dynamic_cast<RdKafka::Producer*>(m_client);
|
||||||
|
// This one is a bit different
|
||||||
|
response_code = producer->produce(topic, partition,
|
||||||
|
RdKafka::Producer::RK_MSG_COPY,
|
||||||
|
message, size,
|
||||||
|
key, key_len,
|
||||||
|
timestamp, headers, opaque);
|
||||||
|
} else {
|
||||||
|
response_code = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
response_code = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// These topics actually link to the configuration
|
||||||
|
// they are made from. It's so we can reuse topic configurations
|
||||||
|
// That means if we delete it here and librd thinks its still linked,
|
||||||
|
// producing to the same topic will try to reuse it and it will die.
|
||||||
|
//
|
||||||
|
// Honestly, we may need to make configuration a first class object
|
||||||
|
// @todo(Conf needs to be a first class object that is passed around)
|
||||||
|
// delete topic;
|
||||||
|
|
||||||
|
if (response_code != RdKafka::ERR_NO_ERROR) {
|
||||||
|
return Baton(response_code);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::Poll() {
|
||||||
|
m_client->poll(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Producer::ConfigureCallback(const std::string &string_key, const v8::Local<v8::Function> &cb, bool add) {
|
||||||
|
if (string_key.compare("delivery_cb") == 0) {
|
||||||
|
if (add) {
|
||||||
|
bool dr_msg_cb = false;
|
||||||
|
v8::Local<v8::String> dr_msg_cb_key = Nan::New("dr_msg_cb").ToLocalChecked();
|
||||||
|
if (Nan::Has(cb, dr_msg_cb_key).FromMaybe(false)) {
|
||||||
|
v8::Local<v8::Value> v = Nan::Get(cb, dr_msg_cb_key).ToLocalChecked();
|
||||||
|
if (v->IsBoolean()) {
|
||||||
|
dr_msg_cb = Nan::To<bool>(v).ToChecked();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (dr_msg_cb) {
|
||||||
|
this->m_dr_cb.SendMessageBuffer(true);
|
||||||
|
}
|
||||||
|
this->m_dr_cb.dispatcher.AddCallback(cb);
|
||||||
|
} else {
|
||||||
|
this->m_dr_cb.dispatcher.RemoveCallback(cb);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Connection::ConfigureCallback(string_key, cb, add);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton rdkafkaErrorToBaton(RdKafka::Error* error) {
|
||||||
|
if ( NULL == error) {
|
||||||
|
return Baton(RdKafka::ERR_NO_ERROR);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Baton result(error->code(), error->str(), error->is_fatal(),
|
||||||
|
error->is_retriable(), error->txn_requires_abort());
|
||||||
|
delete error;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Producer::InitTransactions(int32_t timeout_ms) {
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::Producer* producer = dynamic_cast<RdKafka::Producer*>(m_client);
|
||||||
|
RdKafka::Error* error = producer->init_transactions(timeout_ms);
|
||||||
|
|
||||||
|
return rdkafkaErrorToBaton( error);
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Producer::BeginTransaction() {
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::Producer* producer = dynamic_cast<RdKafka::Producer*>(m_client);
|
||||||
|
RdKafka::Error* error = producer->begin_transaction();
|
||||||
|
|
||||||
|
return rdkafkaErrorToBaton( error);
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Producer::CommitTransaction(int32_t timeout_ms) {
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::Producer* producer = dynamic_cast<RdKafka::Producer*>(m_client);
|
||||||
|
RdKafka::Error* error = producer->commit_transaction(timeout_ms);
|
||||||
|
|
||||||
|
return rdkafkaErrorToBaton( error);
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Producer::AbortTransaction(int32_t timeout_ms) {
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::Producer* producer = dynamic_cast<RdKafka::Producer*>(m_client);
|
||||||
|
RdKafka::Error* error = producer->abort_transaction(timeout_ms);
|
||||||
|
|
||||||
|
return rdkafkaErrorToBaton( error);
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Producer::SendOffsetsToTransaction(
|
||||||
|
std::vector<RdKafka::TopicPartition*> &offsets,
|
||||||
|
NodeKafka::KafkaConsumer* consumer,
|
||||||
|
int timeout_ms) {
|
||||||
|
if (!IsConnected()) {
|
||||||
|
return Baton(RdKafka::ERR__STATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::ConsumerGroupMetadata* group_metadata = dynamic_cast<RdKafka::KafkaConsumer*>(consumer->m_client)->groupMetadata();
|
||||||
|
|
||||||
|
RdKafka::Producer* producer = dynamic_cast<RdKafka::Producer*>(m_client);
|
||||||
|
RdKafka::Error* error = producer->send_offsets_to_transaction(offsets, group_metadata, timeout_ms);
|
||||||
|
delete group_metadata;
|
||||||
|
|
||||||
|
return rdkafkaErrorToBaton( error);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Node exposed methods */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Producer::NodeProduce - produce a message through a producer
|
||||||
|
*
|
||||||
|
* This is a synchronous method. You may ask, "why?". The answer is because
|
||||||
|
* there is no true value doing this asynchronously. All it does is degrade
|
||||||
|
* performance. This method does not block - all it does is add a message
|
||||||
|
* to a queue. In the case where the queue is full, it will return an error
|
||||||
|
* immediately. The only way this method blocks is when you provide it a
|
||||||
|
* flag to do so, which we never do.
|
||||||
|
*
|
||||||
|
* Doing it asynchronously eats up the libuv threadpool for no reason and
|
||||||
|
* increases execution time by a very small amount. It will take two ticks of
|
||||||
|
* the event loop to execute at minimum - 1 for executing it and another for
|
||||||
|
* calling back the callback.
|
||||||
|
*
|
||||||
|
* @sa RdKafka::Producer::produce
|
||||||
|
*/
|
||||||
|
NAN_METHOD(Producer::NodeProduce) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
// Need to extract the message data here.
|
||||||
|
if (info.Length() < 3) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a topic, partition, and message");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second parameter is the partition
|
||||||
|
int32_t partition;
|
||||||
|
|
||||||
|
if (info[1]->IsNull() || info[1]->IsUndefined()) {
|
||||||
|
partition = RdKafka::Topic::PARTITION_UA;
|
||||||
|
} else {
|
||||||
|
partition = Nan::To<int32_t>(info[1]).FromJust();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (partition < 0) {
|
||||||
|
partition = RdKafka::Topic::PARTITION_UA;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t message_buffer_length;
|
||||||
|
void* message_buffer_data;
|
||||||
|
|
||||||
|
if (info[2]->IsNull()) {
|
||||||
|
// This is okay for whatever reason
|
||||||
|
message_buffer_length = 0;
|
||||||
|
message_buffer_data = NULL;
|
||||||
|
} else if (!node::Buffer::HasInstance(info[2])) {
|
||||||
|
return Nan::ThrowError("Message must be a buffer or null");
|
||||||
|
} else {
|
||||||
|
v8::Local<v8::Object> message_buffer_object =
|
||||||
|
(info[2]->ToObject(Nan::GetCurrentContext())).ToLocalChecked();
|
||||||
|
|
||||||
|
// v8 handles the garbage collection here so we need to make a copy of
|
||||||
|
// the buffer or assign the buffer to a persistent handle.
|
||||||
|
|
||||||
|
// I'm not sure which would be the more performant option. I assume
|
||||||
|
// the persistent handle would be but for now we'll try this one
|
||||||
|
// which should be more memory-efficient and allow v8 to dispose of the
|
||||||
|
// buffer sooner
|
||||||
|
|
||||||
|
message_buffer_length = node::Buffer::Length(message_buffer_object);
|
||||||
|
message_buffer_data = node::Buffer::Data(message_buffer_object);
|
||||||
|
if (message_buffer_data == NULL) {
|
||||||
|
// empty string message buffer should not end up as null message
|
||||||
|
v8::Local<v8::Object> message_buffer_object_emptystring = Nan::NewBuffer(new char[0], 0).ToLocalChecked();
|
||||||
|
message_buffer_length = node::Buffer::Length(message_buffer_object_emptystring);
|
||||||
|
message_buffer_data = node::Buffer::Data(message_buffer_object_emptystring);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t key_buffer_length;
|
||||||
|
const void* key_buffer_data;
|
||||||
|
std::string * key = NULL;
|
||||||
|
|
||||||
|
if (info[3]->IsNull() || info[3]->IsUndefined()) {
|
||||||
|
// This is okay for whatever reason
|
||||||
|
key_buffer_length = 0;
|
||||||
|
key_buffer_data = NULL;
|
||||||
|
} else if (node::Buffer::HasInstance(info[3])) {
|
||||||
|
v8::Local<v8::Object> key_buffer_object =
|
||||||
|
(info[3]->ToObject(Nan::GetCurrentContext())).ToLocalChecked();
|
||||||
|
|
||||||
|
// v8 handles the garbage collection here so we need to make a copy of
|
||||||
|
// the buffer or assign the buffer to a persistent handle.
|
||||||
|
|
||||||
|
// I'm not sure which would be the more performant option. I assume
|
||||||
|
// the persistent handle would be but for now we'll try this one
|
||||||
|
// which should be more memory-efficient and allow v8 to dispose of the
|
||||||
|
// buffer sooner
|
||||||
|
|
||||||
|
key_buffer_length = node::Buffer::Length(key_buffer_object);
|
||||||
|
key_buffer_data = node::Buffer::Data(key_buffer_object);
|
||||||
|
if (key_buffer_data == NULL) {
|
||||||
|
// empty string key buffer should not end up as null key
|
||||||
|
v8::Local<v8::Object> key_buffer_object_emptystring = Nan::NewBuffer(new char[0], 0).ToLocalChecked();
|
||||||
|
key_buffer_length = node::Buffer::Length(key_buffer_object_emptystring);
|
||||||
|
key_buffer_data = node::Buffer::Data(key_buffer_object_emptystring);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If it was a string just use the utf8 value.
|
||||||
|
v8::Local<v8::String> val = Nan::To<v8::String>(info[3]).ToLocalChecked();
|
||||||
|
// Get string pointer for this thing
|
||||||
|
Nan::Utf8String keyUTF8(val);
|
||||||
|
key = new std::string(*keyUTF8);
|
||||||
|
|
||||||
|
key_buffer_data = key->data();
|
||||||
|
key_buffer_length = key->length();
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t timestamp;
|
||||||
|
|
||||||
|
if (info.Length() > 4 && !info[4]->IsUndefined() && !info[4]->IsNull()) {
|
||||||
|
if (!info[4]->IsNumber()) {
|
||||||
|
return Nan::ThrowError("Timestamp must be a number");
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp = Nan::To<int64_t>(info[4]).FromJust();
|
||||||
|
} else {
|
||||||
|
timestamp = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* opaque = NULL;
|
||||||
|
// Opaque handling
|
||||||
|
if (info.Length() > 5 && !info[5]->IsUndefined()) {
|
||||||
|
// We need to create a persistent handle
|
||||||
|
opaque = new Nan::Persistent<v8::Value>(info[5]);
|
||||||
|
// To get the local from this later,
|
||||||
|
// v8::Local<v8::Object> object = Nan::New(persistent);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<RdKafka::Headers::Header> headers;
|
||||||
|
if (info.Length() > 6 && !info[6]->IsUndefined()) {
|
||||||
|
v8::Local<v8::Array> v8Headers = v8::Local<v8::Array>::Cast(info[6]);
|
||||||
|
|
||||||
|
if (v8Headers->Length() >= 1) {
|
||||||
|
for (unsigned int i = 0; i < v8Headers->Length(); i++) {
|
||||||
|
v8::Local<v8::Object> header = Nan::Get(v8Headers, i).ToLocalChecked()
|
||||||
|
->ToObject(Nan::GetCurrentContext()).ToLocalChecked();
|
||||||
|
if (header.IsEmpty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Array> props = header->GetOwnPropertyNames(
|
||||||
|
Nan::GetCurrentContext()).ToLocalChecked();
|
||||||
|
Nan::MaybeLocal<v8::String> v8Key = Nan::To<v8::String>(
|
||||||
|
Nan::Get(props, 0).ToLocalChecked());
|
||||||
|
Nan::MaybeLocal<v8::String> v8Value = Nan::To<v8::String>(
|
||||||
|
Nan::Get(header, v8Key.ToLocalChecked()).ToLocalChecked());
|
||||||
|
|
||||||
|
Nan::Utf8String uKey(v8Key.ToLocalChecked());
|
||||||
|
std::string key(*uKey);
|
||||||
|
|
||||||
|
Nan::Utf8String uValue(v8Value.ToLocalChecked());
|
||||||
|
std::string value(*uValue);
|
||||||
|
headers.push_back(
|
||||||
|
RdKafka::Headers::Header(key, value.c_str(), value.size()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
|
||||||
|
// Let the JS library throw if we need to so the error can be more rich
|
||||||
|
int error_code;
|
||||||
|
|
||||||
|
if (info[0]->IsString()) {
|
||||||
|
// Get string pointer for this thing
|
||||||
|
Nan::Utf8String topicUTF8(Nan::To<v8::String>(info[0]).ToLocalChecked());
|
||||||
|
std::string topic_name(*topicUTF8);
|
||||||
|
RdKafka::Headers *rd_headers = RdKafka::Headers::create(headers);
|
||||||
|
|
||||||
|
Baton b = producer->Produce(message_buffer_data, message_buffer_length,
|
||||||
|
topic_name, partition, key_buffer_data, key_buffer_length,
|
||||||
|
timestamp, opaque, rd_headers);
|
||||||
|
|
||||||
|
error_code = static_cast<int>(b.err());
|
||||||
|
if (error_code != 0 && rd_headers) {
|
||||||
|
delete rd_headers;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// First parameter is a topic OBJECT
|
||||||
|
Topic* topic = ObjectWrap::Unwrap<Topic>(info[0].As<v8::Object>());
|
||||||
|
|
||||||
|
// Unwrap it and turn it into an RdKafka::Topic*
|
||||||
|
Baton topic_baton = topic->toRDKafkaTopic(producer);
|
||||||
|
|
||||||
|
if (topic_baton.err() != RdKafka::ERR_NO_ERROR) {
|
||||||
|
// Let the JS library throw if we need to so the error can be more rich
|
||||||
|
error_code = static_cast<int>(topic_baton.err());
|
||||||
|
|
||||||
|
return info.GetReturnValue().Set(Nan::New<v8::Number>(error_code));
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::Topic* rd_topic = topic_baton.data<RdKafka::Topic*>();
|
||||||
|
|
||||||
|
Baton b = producer->Produce(message_buffer_data, message_buffer_length,
|
||||||
|
rd_topic, partition, key_buffer_data, key_buffer_length, opaque);
|
||||||
|
|
||||||
|
// Delete the topic when we are done.
|
||||||
|
delete rd_topic;
|
||||||
|
|
||||||
|
error_code = static_cast<int>(b.err());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (key != NULL) {
|
||||||
|
delete key;
|
||||||
|
}
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::New<v8::Number>(error_code));
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodeSetPartitioner) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 1 || !info[0]->IsFunction()) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
v8::Local<v8::Function> cb = info[0].As<v8::Function>();
|
||||||
|
producer->m_partitioner_cb.SetCallback(cb);
|
||||||
|
info.GetReturnValue().Set(Nan::True());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodeConnect) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 1 || !info[0]->IsFunction()) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
// This needs to be offloaded to libuv
|
||||||
|
v8::Local<v8::Function> cb = info[0].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
Nan::AsyncQueueWorker(new Workers::ProducerConnect(callback, producer));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodePoll) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
|
||||||
|
if (!producer->IsConnected()) {
|
||||||
|
Nan::ThrowError("Producer is disconnected");
|
||||||
|
} else {
|
||||||
|
producer->Poll();
|
||||||
|
info.GetReturnValue().Set(Nan::True());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Producer::Flush(int timeout_ms) {
|
||||||
|
RdKafka::ErrorCode response_code;
|
||||||
|
if (IsConnected()) {
|
||||||
|
scoped_shared_read_lock lock(m_connection_lock);
|
||||||
|
if (IsConnected()) {
|
||||||
|
RdKafka::Producer* producer = dynamic_cast<RdKafka::Producer*>(m_client);
|
||||||
|
response_code = producer->flush(timeout_ms);
|
||||||
|
} else {
|
||||||
|
response_code = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
response_code = RdKafka::ERR__STATE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baton(response_code);
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodeFlush) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a timeout and a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
int timeout_ms = Nan::To<int>(info[0]).FromJust();
|
||||||
|
|
||||||
|
v8::Local<v8::Function> cb = info[1].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
|
||||||
|
Nan::AsyncQueueWorker(
|
||||||
|
new Workers::ProducerFlush(callback, producer, timeout_ms));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodeDisconnect) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 1 || !info[0]->IsFunction()) {
|
||||||
|
// Just throw an exception
|
||||||
|
return Nan::ThrowError("Need to specify a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
v8::Local<v8::Function> cb = info[0].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
Nan::AsyncQueueWorker(new Workers::ProducerDisconnect(callback, producer));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodeInitTransactions) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) {
|
||||||
|
return Nan::ThrowError("Need to specify a timeout and a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
int timeout_ms = Nan::To<int>(info[0]).FromJust();
|
||||||
|
|
||||||
|
v8::Local<v8::Function> cb = info[1].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
Nan::AsyncQueueWorker(new Workers::ProducerInitTransactions(callback, producer, timeout_ms));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodeBeginTransaction) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 1 || !info[0]->IsFunction()) {
|
||||||
|
return Nan::ThrowError("Need to specify a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Function> cb = info[0].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
Nan::AsyncQueueWorker(new Workers::ProducerBeginTransaction(callback, producer));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodeCommitTransaction) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) {
|
||||||
|
return Nan::ThrowError("Need to specify a timeout and a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
int timeout_ms = Nan::To<int>(info[0]).FromJust();
|
||||||
|
|
||||||
|
v8::Local<v8::Function> cb = info[1].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
Nan::AsyncQueueWorker(new Workers::ProducerCommitTransaction(callback, producer, timeout_ms));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodeAbortTransaction) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) {
|
||||||
|
return Nan::ThrowError("Need to specify a timeout and a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
int timeout_ms = Nan::To<int>(info[0]).FromJust();
|
||||||
|
|
||||||
|
v8::Local<v8::Function> cb = info[1].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
Nan::AsyncQueueWorker(new Workers::ProducerAbortTransaction(callback, producer, timeout_ms));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Producer::NodeSendOffsetsToTransaction) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
if (info.Length() < 4) {
|
||||||
|
return Nan::ThrowError("Need to specify offsets, consumer, timeout for 'send offsets to transaction', and callback");
|
||||||
|
}
|
||||||
|
if (!info[0]->IsArray()) {
|
||||||
|
return Nan::ThrowError("First argument to 'send offsets to transaction' has to be a consumer object");
|
||||||
|
}
|
||||||
|
if (!info[1]->IsObject()) {
|
||||||
|
Nan::ThrowError("Kafka consumer must be provided");
|
||||||
|
}
|
||||||
|
if (!info[2]->IsNumber()) {
|
||||||
|
Nan::ThrowError("Timeout must be provided");
|
||||||
|
}
|
||||||
|
if (!info[3]->IsFunction()) {
|
||||||
|
return Nan::ThrowError("Need to specify a callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<RdKafka::TopicPartition*> toppars =
|
||||||
|
Conversion::TopicPartition::FromV8Array(info[0].As<v8::Array>());
|
||||||
|
NodeKafka::KafkaConsumer* consumer =
|
||||||
|
ObjectWrap::Unwrap<KafkaConsumer>(info[1].As<v8::Object>());
|
||||||
|
int timeout_ms = Nan::To<int>(info[2]).FromJust();
|
||||||
|
v8::Local<v8::Function> cb = info[3].As<v8::Function>();
|
||||||
|
Nan::Callback *callback = new Nan::Callback(cb);
|
||||||
|
|
||||||
|
Producer* producer = ObjectWrap::Unwrap<Producer>(info.This());
|
||||||
|
Nan::AsyncQueueWorker(new Workers::ProducerSendOffsetsToTransaction(
|
||||||
|
callback,
|
||||||
|
producer,
|
||||||
|
toppars,
|
||||||
|
consumer,
|
||||||
|
timeout_ms
|
||||||
|
));
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::Null());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
123
src/producer.h
Normal file
123
src/producer.h
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_PRODUCER_H_
|
||||||
|
#define SRC_PRODUCER_H_
|
||||||
|
|
||||||
|
#include <nan.h>
|
||||||
|
#include <node.h>
|
||||||
|
#include <node_buffer.h>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
|
||||||
|
#include "src/common.h"
|
||||||
|
#include "src/connection.h"
|
||||||
|
#include "src/callbacks.h"
|
||||||
|
#include "src/topic.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
class ProducerMessage {
|
||||||
|
public:
|
||||||
|
explicit ProducerMessage(v8::Local<v8::Object>, NodeKafka::Topic*);
|
||||||
|
~ProducerMessage();
|
||||||
|
|
||||||
|
void* Payload();
|
||||||
|
size_t Size();
|
||||||
|
bool IsEmpty();
|
||||||
|
RdKafka::Topic * GetTopic();
|
||||||
|
|
||||||
|
std::string m_errstr;
|
||||||
|
|
||||||
|
Topic * m_topic;
|
||||||
|
int32_t m_partition;
|
||||||
|
std::string m_key;
|
||||||
|
|
||||||
|
void* m_buffer_data;
|
||||||
|
size_t m_buffer_length;
|
||||||
|
|
||||||
|
bool m_is_empty;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Producer : public Connection {
|
||||||
|
public:
|
||||||
|
static void Init(v8::Local<v8::Object>);
|
||||||
|
static v8::Local<v8::Object> NewInstance(v8::Local<v8::Value>);
|
||||||
|
|
||||||
|
Baton Connect();
|
||||||
|
void Disconnect();
|
||||||
|
void Poll();
|
||||||
|
#if RD_KAFKA_VERSION > 0x00090200
|
||||||
|
Baton Flush(int timeout_ms);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
Baton Produce(void* message, size_t message_size,
|
||||||
|
RdKafka::Topic* topic, int32_t partition,
|
||||||
|
const void* key, size_t key_len,
|
||||||
|
void* opaque);
|
||||||
|
|
||||||
|
Baton Produce(void* message, size_t message_size,
|
||||||
|
std::string topic, int32_t partition,
|
||||||
|
std::string* key,
|
||||||
|
int64_t timestamp, void* opaque,
|
||||||
|
RdKafka::Headers* headers);
|
||||||
|
|
||||||
|
Baton Produce(void* message, size_t message_size,
|
||||||
|
std::string topic, int32_t partition,
|
||||||
|
const void* key, size_t key_len,
|
||||||
|
int64_t timestamp, void* opaque,
|
||||||
|
RdKafka::Headers* headers);
|
||||||
|
|
||||||
|
std::string Name();
|
||||||
|
|
||||||
|
void ActivateDispatchers();
|
||||||
|
void DeactivateDispatchers();
|
||||||
|
|
||||||
|
void ConfigureCallback(const std::string &string_key, const v8::Local<v8::Function> &cb, bool add) override;
|
||||||
|
|
||||||
|
Baton InitTransactions(int32_t timeout_ms);
|
||||||
|
Baton BeginTransaction();
|
||||||
|
Baton CommitTransaction(int32_t timeout_ms);
|
||||||
|
Baton AbortTransaction(int32_t timeout_ms);
|
||||||
|
Baton SendOffsetsToTransaction(
|
||||||
|
std::vector<RdKafka::TopicPartition*> &offsets,
|
||||||
|
NodeKafka::KafkaConsumer* consumer,
|
||||||
|
int timeout_ms
|
||||||
|
);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
static Nan::Persistent<v8::Function> constructor;
|
||||||
|
static void New(const Nan::FunctionCallbackInfo<v8::Value>&);
|
||||||
|
|
||||||
|
Producer(Conf*, Conf*);
|
||||||
|
~Producer();
|
||||||
|
|
||||||
|
private:
|
||||||
|
static NAN_METHOD(NodeProduce);
|
||||||
|
static NAN_METHOD(NodeSetPartitioner);
|
||||||
|
static NAN_METHOD(NodeConnect);
|
||||||
|
static NAN_METHOD(NodeDisconnect);
|
||||||
|
static NAN_METHOD(NodePoll);
|
||||||
|
#if RD_KAFKA_VERSION > 0x00090200
|
||||||
|
static NAN_METHOD(NodeFlush);
|
||||||
|
#endif
|
||||||
|
static NAN_METHOD(NodeInitTransactions);
|
||||||
|
static NAN_METHOD(NodeBeginTransaction);
|
||||||
|
static NAN_METHOD(NodeCommitTransaction);
|
||||||
|
static NAN_METHOD(NodeAbortTransaction);
|
||||||
|
static NAN_METHOD(NodeSendOffsetsToTransaction);
|
||||||
|
|
||||||
|
Callbacks::Delivery m_dr_cb;
|
||||||
|
Callbacks::Partitioner m_partitioner_cb;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_PRODUCER_H_
|
173
src/topic.cc
Normal file
173
src/topic.cc
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "src/common.h"
|
||||||
|
#include "src/connection.h"
|
||||||
|
#include "src/topic.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Producer v8 wrapped object.
|
||||||
|
*
|
||||||
|
* Wraps the RdKafka::Producer object with compositional inheritence and
|
||||||
|
* provides methods for interacting with it exposed to node.
|
||||||
|
*
|
||||||
|
* The base wrappable RdKafka::Handle deals with most of the wrapping but
|
||||||
|
* we still need to declare its prototype.
|
||||||
|
*
|
||||||
|
* @sa RdKafka::Producer
|
||||||
|
* @sa NodeKafka::Connection
|
||||||
|
*/
|
||||||
|
|
||||||
|
Topic::Topic(std::string topic_name, RdKafka::Conf* config):
|
||||||
|
m_topic_name(topic_name),
|
||||||
|
m_config(config) {
|
||||||
|
// We probably want to copy the config. May require refactoring if we do not
|
||||||
|
}
|
||||||
|
|
||||||
|
Topic::~Topic() {
|
||||||
|
if (m_config) {
|
||||||
|
delete m_config;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string Topic::name() {
|
||||||
|
return m_topic_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton Topic::toRDKafkaTopic(Connection* handle) {
|
||||||
|
if (m_config) {
|
||||||
|
return handle->CreateTopic(m_topic_name, m_config);
|
||||||
|
} else {
|
||||||
|
return handle->CreateTopic(m_topic_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
bool partition_available(int32_t partition) {
|
||||||
|
return topic_->partition_available(partition);
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton offset_store (int32_t partition, int64_t offset) {
|
||||||
|
RdKafka::ErrorCode err = topic_->offset_store(partition, offset);
|
||||||
|
|
||||||
|
switch (err) {
|
||||||
|
case RdKafka::ERR_NO_ERROR:
|
||||||
|
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
Nan::Persistent<v8::Function> Topic::constructor;
|
||||||
|
|
||||||
|
void Topic::Init(v8::Local<v8::Object> exports) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
v8::Local<v8::FunctionTemplate> tpl = Nan::New<v8::FunctionTemplate>(New);
|
||||||
|
tpl->SetClassName(Nan::New("Topic").ToLocalChecked());
|
||||||
|
tpl->InstanceTemplate()->SetInternalFieldCount(1);
|
||||||
|
|
||||||
|
Nan::SetPrototypeMethod(tpl, "name", NodeGetName);
|
||||||
|
|
||||||
|
// connect. disconnect. resume. pause. get meta data
|
||||||
|
constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext()))
|
||||||
|
.ToLocalChecked());
|
||||||
|
|
||||||
|
Nan::Set(exports, Nan::New("Topic").ToLocalChecked(),
|
||||||
|
tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Topic::New(const Nan::FunctionCallbackInfo<v8::Value>& info) {
|
||||||
|
if (!info.IsConstructCall()) {
|
||||||
|
return Nan::ThrowError("non-constructor invocation not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info.Length() < 1) {
|
||||||
|
return Nan::ThrowError("topic name is required");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!info[0]->IsString()) {
|
||||||
|
return Nan::ThrowError("Topic name must be a string");
|
||||||
|
}
|
||||||
|
|
||||||
|
RdKafka::Conf* config = NULL;
|
||||||
|
|
||||||
|
if (info.Length() >= 2 && !info[1]->IsUndefined() && !info[1]->IsNull()) {
|
||||||
|
// If they gave us two parameters, or the 3rd parameter is null or
|
||||||
|
// undefined, we want to pass null in for the config
|
||||||
|
|
||||||
|
std::string errstr;
|
||||||
|
if (!info[1]->IsObject()) {
|
||||||
|
return Nan::ThrowError("Configuration data must be specified");
|
||||||
|
}
|
||||||
|
|
||||||
|
config = Conf::create(RdKafka::Conf::CONF_TOPIC, (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); // NOLINT
|
||||||
|
|
||||||
|
if (!config) {
|
||||||
|
return Nan::ThrowError(errstr.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Nan::Utf8String parameterValue(Nan::To<v8::String>(info[0]).ToLocalChecked());
|
||||||
|
std::string topic_name(*parameterValue);
|
||||||
|
|
||||||
|
Topic* topic = new Topic(topic_name, config);
|
||||||
|
|
||||||
|
// Wrap it
|
||||||
|
topic->Wrap(info.This());
|
||||||
|
|
||||||
|
// Then there is some weird initialization that happens
|
||||||
|
// basically it sets the configuration data
|
||||||
|
// we don't need to do that because we lazy load it
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(info.This());
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle
|
||||||
|
|
||||||
|
v8::Local<v8::Object> Topic::NewInstance(v8::Local<v8::Value> arg) {
|
||||||
|
Nan::EscapableHandleScope scope;
|
||||||
|
|
||||||
|
const unsigned argc = 1;
|
||||||
|
|
||||||
|
v8::Local<v8::Value> argv[argc] = { arg };
|
||||||
|
v8::Local<v8::Function> cons = Nan::New<v8::Function>(constructor);
|
||||||
|
v8::Local<v8::Object> instance =
|
||||||
|
Nan::NewInstance(cons, argc, argv).ToLocalChecked();
|
||||||
|
|
||||||
|
return scope.Escape(instance);
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Topic::NodeGetName) {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
Topic* topic = ObjectWrap::Unwrap<Topic>(info.This());
|
||||||
|
|
||||||
|
info.GetReturnValue().Set(Nan::New(topic->name()).ToLocalChecked());
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Topic::NodePartitionAvailable) {
|
||||||
|
// @TODO(sparente)
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_METHOD(Topic::NodeOffsetStore) {
|
||||||
|
// @TODO(sparente)
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
54
src/topic.h
Normal file
54
src/topic.h
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_TOPIC_H_
|
||||||
|
#define SRC_TOPIC_H_
|
||||||
|
|
||||||
|
#include <nan.h>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "rdkafkacpp.h"
|
||||||
|
|
||||||
|
#include "src/config.h"
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
|
||||||
|
class Topic : public Nan::ObjectWrap {
|
||||||
|
public:
|
||||||
|
static void Init(v8::Local<v8::Object>);
|
||||||
|
static v8::Local<v8::Object> NewInstance(v8::Local<v8::Value> arg);
|
||||||
|
|
||||||
|
Baton toRDKafkaTopic(Connection *handle);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
static Nan::Persistent<v8::Function> constructor;
|
||||||
|
static void New(const Nan::FunctionCallbackInfo<v8::Value>& info);
|
||||||
|
|
||||||
|
static NAN_METHOD(NodeGetMetadata);
|
||||||
|
|
||||||
|
// TopicConfig * config_;
|
||||||
|
|
||||||
|
std::string errstr;
|
||||||
|
std::string name();
|
||||||
|
|
||||||
|
private:
|
||||||
|
Topic(std::string, RdKafka::Conf *);
|
||||||
|
~Topic();
|
||||||
|
|
||||||
|
std::string m_topic_name;
|
||||||
|
RdKafka::Conf * m_config;
|
||||||
|
|
||||||
|
static NAN_METHOD(NodeGetName);
|
||||||
|
static NAN_METHOD(NodePartitionAvailable);
|
||||||
|
static NAN_METHOD(NodeOffsetStore);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_TOPIC_H_
|
1238
src/workers.cc
Normal file
1238
src/workers.cc
Normal file
File diff suppressed because it is too large
Load Diff
505
src/workers.h
Normal file
505
src/workers.h
Normal file
@ -0,0 +1,505 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SRC_WORKERS_H_
|
||||||
|
#define SRC_WORKERS_H_
|
||||||
|
|
||||||
|
#include <uv.h>
|
||||||
|
#include <nan.h>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "src/common.h"
|
||||||
|
#include "src/producer.h"
|
||||||
|
#include "src/kafka-consumer.h"
|
||||||
|
#include "src/admin.h"
|
||||||
|
#include "rdkafka.h" // NOLINT
|
||||||
|
|
||||||
|
namespace NodeKafka {
|
||||||
|
namespace Workers {
|
||||||
|
|
||||||
|
class ErrorAwareWorker : public Nan::AsyncWorker {
|
||||||
|
public:
|
||||||
|
explicit ErrorAwareWorker(Nan::Callback* callback_) :
|
||||||
|
Nan::AsyncWorker(callback_),
|
||||||
|
m_baton(RdKafka::ERR_NO_ERROR) {}
|
||||||
|
virtual ~ErrorAwareWorker() {}
|
||||||
|
|
||||||
|
virtual void Execute() = 0;
|
||||||
|
virtual void HandleOKCallback() = 0;
|
||||||
|
void HandleErrorCallback() {
|
||||||
|
Nan::HandleScope scope;
|
||||||
|
|
||||||
|
const unsigned int argc = 1;
|
||||||
|
v8::Local<v8::Value> argv[argc] = { Nan::Error(ErrorMessage()) };
|
||||||
|
|
||||||
|
callback->Call(argc, argv);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void SetErrorCode(const int & code) {
|
||||||
|
RdKafka::ErrorCode rd_err = static_cast<RdKafka::ErrorCode>(code);
|
||||||
|
SetErrorCode(rd_err);
|
||||||
|
}
|
||||||
|
void SetErrorCode(const RdKafka::ErrorCode & err) {
|
||||||
|
SetErrorBaton(Baton(err));
|
||||||
|
}
|
||||||
|
void SetErrorBaton(const NodeKafka::Baton & baton) {
|
||||||
|
m_baton = baton;
|
||||||
|
SetErrorMessage(m_baton.errstr().c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
int GetErrorCode() {
|
||||||
|
return m_baton.err();
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::Local<v8::Object> GetErrorObject() {
|
||||||
|
return m_baton.ToObject();
|
||||||
|
}
|
||||||
|
|
||||||
|
Baton m_baton;
|
||||||
|
};
|
||||||
|
|
||||||
|
class MessageWorker : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
explicit MessageWorker(Nan::Callback* callback_)
|
||||||
|
: ErrorAwareWorker(callback_), m_asyncdata() {
|
||||||
|
m_async = new uv_async_t;
|
||||||
|
uv_async_init(
|
||||||
|
uv_default_loop(),
|
||||||
|
m_async,
|
||||||
|
m_async_message);
|
||||||
|
m_async->data = this;
|
||||||
|
|
||||||
|
uv_mutex_init(&m_async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual ~MessageWorker() {
|
||||||
|
uv_mutex_destroy(&m_async_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void WorkMessage() {
|
||||||
|
if (!callback) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<RdKafka::Message*> message_queue;
|
||||||
|
std::vector<RdKafka::ErrorCode> warning_queue;
|
||||||
|
|
||||||
|
{
|
||||||
|
scoped_mutex_lock lock(m_async_lock);
|
||||||
|
// Copy the vector and empty it
|
||||||
|
m_asyncdata.swap(message_queue);
|
||||||
|
m_asyncwarning.swap(warning_queue);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i < message_queue.size(); i++) {
|
||||||
|
HandleMessageCallback(message_queue[i], RdKafka::ERR_NO_ERROR);
|
||||||
|
|
||||||
|
// we are done with it. it is about to go out of scope
|
||||||
|
// for the last time so let's just free it up here. can't rely
|
||||||
|
// on the destructor
|
||||||
|
}
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i < warning_queue.size(); i++) {
|
||||||
|
HandleMessageCallback(NULL, warning_queue[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class ExecutionMessageBus {
|
||||||
|
friend class MessageWorker;
|
||||||
|
public:
|
||||||
|
void Send(RdKafka::Message* m) const {
|
||||||
|
that_->Produce_(m);
|
||||||
|
}
|
||||||
|
void SendWarning(RdKafka::ErrorCode c) const {
|
||||||
|
that_->ProduceWarning_(c);
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
explicit ExecutionMessageBus(MessageWorker* that) : that_(that) {}
|
||||||
|
MessageWorker* const that_;
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual void Execute(const ExecutionMessageBus&) = 0;
|
||||||
|
virtual void HandleMessageCallback(RdKafka::Message*, RdKafka::ErrorCode) = 0;
|
||||||
|
|
||||||
|
virtual void Destroy() {
|
||||||
|
uv_close(reinterpret_cast<uv_handle_t*>(m_async), AsyncClose_);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void Execute() {
|
||||||
|
ExecutionMessageBus message_bus(this);
|
||||||
|
Execute(message_bus);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Produce_(RdKafka::Message* m) {
|
||||||
|
scoped_mutex_lock lock(m_async_lock);
|
||||||
|
m_asyncdata.push_back(m);
|
||||||
|
uv_async_send(m_async);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ProduceWarning_(RdKafka::ErrorCode c) {
|
||||||
|
scoped_mutex_lock lock(m_async_lock);
|
||||||
|
m_asyncwarning.push_back(c);
|
||||||
|
uv_async_send(m_async);
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_INLINE static NAUV_WORK_CB(m_async_message) {
|
||||||
|
MessageWorker *worker = static_cast<MessageWorker*>(async->data);
|
||||||
|
worker->WorkMessage();
|
||||||
|
}
|
||||||
|
|
||||||
|
NAN_INLINE static void AsyncClose_(uv_handle_t* handle) {
|
||||||
|
MessageWorker *worker = static_cast<MessageWorker*>(handle->data);
|
||||||
|
delete reinterpret_cast<uv_async_t*>(handle);
|
||||||
|
delete worker;
|
||||||
|
}
|
||||||
|
|
||||||
|
uv_async_t *m_async;
|
||||||
|
uv_mutex_t m_async_lock;
|
||||||
|
std::vector<RdKafka::Message*> m_asyncdata;
|
||||||
|
std::vector<RdKafka::ErrorCode> m_asyncwarning;
|
||||||
|
};
|
||||||
|
|
||||||
|
namespace Handle {
|
||||||
|
class OffsetsForTimes : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
OffsetsForTimes(Nan::Callback*, NodeKafka::Connection*,
|
||||||
|
std::vector<RdKafka::TopicPartition*> &,
|
||||||
|
const int &);
|
||||||
|
~OffsetsForTimes();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Connection * m_handle;
|
||||||
|
std::vector<RdKafka::TopicPartition*> m_topic_partitions;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
} // namespace Handle
|
||||||
|
|
||||||
|
class ConnectionMetadata : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ConnectionMetadata(Nan::Callback*, NodeKafka::Connection*,
|
||||||
|
std::string, int, bool);
|
||||||
|
~ConnectionMetadata();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Connection * m_connection;
|
||||||
|
std::string m_topic;
|
||||||
|
int m_timeout_ms;
|
||||||
|
bool m_all_topics;
|
||||||
|
|
||||||
|
RdKafka::Metadata* m_metadata;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ConnectionQueryWatermarkOffsets : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ConnectionQueryWatermarkOffsets(Nan::Callback*, NodeKafka::Connection*,
|
||||||
|
std::string, int32_t, int);
|
||||||
|
~ConnectionQueryWatermarkOffsets();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Connection * m_connection;
|
||||||
|
std::string m_topic;
|
||||||
|
int32_t m_partition;
|
||||||
|
int m_timeout_ms;
|
||||||
|
|
||||||
|
int64_t m_high_offset;
|
||||||
|
int64_t m_low_offset;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ProducerConnect : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ProducerConnect(Nan::Callback*, NodeKafka::Producer*);
|
||||||
|
~ProducerConnect();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Producer * producer;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ProducerDisconnect : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ProducerDisconnect(Nan::Callback*, NodeKafka::Producer*);
|
||||||
|
~ProducerDisconnect();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Producer * producer;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ProducerFlush : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ProducerFlush(Nan::Callback*, NodeKafka::Producer*, int);
|
||||||
|
~ProducerFlush();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Producer * producer;
|
||||||
|
int timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ProducerInitTransactions : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ProducerInitTransactions(Nan::Callback*, NodeKafka::Producer*, const int &);
|
||||||
|
~ProducerInitTransactions();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Producer * producer;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ProducerBeginTransaction : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ProducerBeginTransaction(Nan::Callback*, NodeKafka::Producer*);
|
||||||
|
~ProducerBeginTransaction();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Producer * producer;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ProducerCommitTransaction : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ProducerCommitTransaction(Nan::Callback*, NodeKafka::Producer*, const int &);
|
||||||
|
~ProducerCommitTransaction();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Producer * producer;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ProducerAbortTransaction : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ProducerAbortTransaction(Nan::Callback*, NodeKafka::Producer*, const int &);
|
||||||
|
~ProducerAbortTransaction();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Producer * producer;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ProducerSendOffsetsToTransaction : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
ProducerSendOffsetsToTransaction(
|
||||||
|
Nan::Callback*, NodeKafka::Producer*,
|
||||||
|
std::vector<RdKafka::TopicPartition*> &,
|
||||||
|
KafkaConsumer*,
|
||||||
|
const int &);
|
||||||
|
~ProducerSendOffsetsToTransaction();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::Producer * producer;
|
||||||
|
std::vector<RdKafka::TopicPartition*> m_topic_partitions;
|
||||||
|
NodeKafka::KafkaConsumer* consumer;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
class KafkaConsumerConnect : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
KafkaConsumerConnect(Nan::Callback*, NodeKafka::KafkaConsumer*);
|
||||||
|
~KafkaConsumerConnect();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::KafkaConsumer * consumer;
|
||||||
|
};
|
||||||
|
|
||||||
|
class KafkaConsumerDisconnect : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
KafkaConsumerDisconnect(Nan::Callback*, NodeKafka::KafkaConsumer*);
|
||||||
|
~KafkaConsumerDisconnect();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
|
||||||
|
private:
|
||||||
|
NodeKafka::KafkaConsumer * consumer;
|
||||||
|
};
|
||||||
|
|
||||||
|
class KafkaConsumerConsumeLoop : public MessageWorker {
|
||||||
|
public:
|
||||||
|
KafkaConsumerConsumeLoop(Nan::Callback*,
|
||||||
|
NodeKafka::KafkaConsumer*, const int &, const int &);
|
||||||
|
~KafkaConsumerConsumeLoop();
|
||||||
|
|
||||||
|
void Execute(const ExecutionMessageBus&);
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
void HandleMessageCallback(RdKafka::Message*, RdKafka::ErrorCode);
|
||||||
|
private:
|
||||||
|
NodeKafka::KafkaConsumer * consumer;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
unsigned int m_rand_seed;
|
||||||
|
const int m_timeout_sleep_delay_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
class KafkaConsumerConsume : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
KafkaConsumerConsume(Nan::Callback*, NodeKafka::KafkaConsumer*, const int &);
|
||||||
|
~KafkaConsumerConsume();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
private:
|
||||||
|
NodeKafka::KafkaConsumer * consumer;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
RdKafka::Message* m_message;
|
||||||
|
};
|
||||||
|
|
||||||
|
class KafkaConsumerCommitted : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
KafkaConsumerCommitted(Nan::Callback*,
|
||||||
|
NodeKafka::KafkaConsumer*, std::vector<RdKafka::TopicPartition*> &,
|
||||||
|
const int &);
|
||||||
|
~KafkaConsumerCommitted();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
private:
|
||||||
|
NodeKafka::KafkaConsumer * m_consumer;
|
||||||
|
std::vector<RdKafka::TopicPartition*> m_topic_partitions;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
class KafkaConsumerSeek : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
KafkaConsumerSeek(Nan::Callback*, NodeKafka::KafkaConsumer*,
|
||||||
|
const RdKafka::TopicPartition *, const int &);
|
||||||
|
~KafkaConsumerSeek();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
private:
|
||||||
|
NodeKafka::KafkaConsumer * m_consumer;
|
||||||
|
const RdKafka::TopicPartition * m_toppar;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
class KafkaConsumerConsumeNum : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
KafkaConsumerConsumeNum(Nan::Callback*, NodeKafka::KafkaConsumer*,
|
||||||
|
const uint32_t &, const int &);
|
||||||
|
~KafkaConsumerConsumeNum();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
private:
|
||||||
|
NodeKafka::KafkaConsumer * m_consumer;
|
||||||
|
const uint32_t m_num_messages;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
std::vector<RdKafka::Message*> m_messages;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Create a kafka topic on a remote broker cluster
|
||||||
|
*/
|
||||||
|
class AdminClientCreateTopic : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
AdminClientCreateTopic(Nan::Callback*, NodeKafka::AdminClient*,
|
||||||
|
rd_kafka_NewTopic_t*, const int &);
|
||||||
|
~AdminClientCreateTopic();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
private:
|
||||||
|
NodeKafka::AdminClient * m_client;
|
||||||
|
rd_kafka_NewTopic_t* m_topic;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Delete a kafka topic on a remote broker cluster
|
||||||
|
*/
|
||||||
|
class AdminClientDeleteTopic : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
AdminClientDeleteTopic(Nan::Callback*, NodeKafka::AdminClient*,
|
||||||
|
rd_kafka_DeleteTopic_t*, const int &);
|
||||||
|
~AdminClientDeleteTopic();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
private:
|
||||||
|
NodeKafka::AdminClient * m_client;
|
||||||
|
rd_kafka_DeleteTopic_t* m_topic;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Delete a kafka topic on a remote broker cluster
|
||||||
|
*/
|
||||||
|
class AdminClientCreatePartitions : public ErrorAwareWorker {
|
||||||
|
public:
|
||||||
|
AdminClientCreatePartitions(Nan::Callback*, NodeKafka::AdminClient*,
|
||||||
|
rd_kafka_NewPartitions_t*, const int &);
|
||||||
|
~AdminClientCreatePartitions();
|
||||||
|
|
||||||
|
void Execute();
|
||||||
|
void HandleOKCallback();
|
||||||
|
void HandleErrorCallback();
|
||||||
|
private:
|
||||||
|
NodeKafka::AdminClient * m_client;
|
||||||
|
rd_kafka_NewPartitions_t* m_partitions;
|
||||||
|
const int m_timeout_ms;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Workers
|
||||||
|
|
||||||
|
} // namespace NodeKafka
|
||||||
|
|
||||||
|
#endif // SRC_WORKERS_H_
|
66
test/binding.spec.js
Normal file
66
test/binding.spec.js
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var addon = require('bindings')('node-librdkafka');
|
||||||
|
var t = require('assert');
|
||||||
|
|
||||||
|
var consumerConfig = {
|
||||||
|
'group.id': 'awesome'
|
||||||
|
};
|
||||||
|
|
||||||
|
var producerConfig = {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'metadata.broker.list': 'localhost:9092',
|
||||||
|
'socket.timeout.ms': 250
|
||||||
|
};
|
||||||
|
|
||||||
|
var client;
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'native addon': {
|
||||||
|
'exports something': function() {
|
||||||
|
t.equal(typeof(addon), 'object');
|
||||||
|
},
|
||||||
|
'exports valid producer': function() {
|
||||||
|
t.equal(typeof(addon.Producer), 'function');
|
||||||
|
t.throws(addon.Producer); // Requires constructor
|
||||||
|
t.equal(typeof(new addon.Producer({}, {})), 'object');
|
||||||
|
},
|
||||||
|
'exports valid consumer': function() {
|
||||||
|
t.equal(typeof(addon.KafkaConsumer), 'function');
|
||||||
|
t.throws(addon.KafkaConsumer); // Requires constructor
|
||||||
|
t.equal(typeof(new addon.KafkaConsumer(consumerConfig, {})), 'object');
|
||||||
|
},
|
||||||
|
'exports version': function() {
|
||||||
|
t.ok(addon.librdkafkaVersion);
|
||||||
|
},
|
||||||
|
'Producer client': {
|
||||||
|
'beforeEach': function() {
|
||||||
|
client = new addon.Producer(producerConfig, {});
|
||||||
|
},
|
||||||
|
'afterEach': function() {
|
||||||
|
client = null;
|
||||||
|
},
|
||||||
|
'is an object': function() {
|
||||||
|
t.equal(typeof(client), 'object');
|
||||||
|
},
|
||||||
|
'requires configuration': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
return new addon.Producer();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'has necessary methods from superclass': function() {
|
||||||
|
var methods = ['connect', 'disconnect', 'configureCallbacks', 'getMetadata'];
|
||||||
|
methods.forEach(function(m) {
|
||||||
|
t.equal(typeof(client[m]), 'function', 'Client is missing ' + m + ' method');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
86
test/consumer.spec.js
Normal file
86
test/consumer.spec.js
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var addon = require('bindings')('node-librdkafka');
|
||||||
|
var t = require('assert');
|
||||||
|
|
||||||
|
var client;
|
||||||
|
var defaultConfig = {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'group.id': 'kafka-mocha-grp',
|
||||||
|
'metadata.broker.list': 'localhost:9092'
|
||||||
|
};
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'Consumer': {
|
||||||
|
'afterEach': function() {
|
||||||
|
client = null;
|
||||||
|
},
|
||||||
|
'cannot be set without a topic config': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
client = new addon.KafkaConsumer(defaultConfig);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'can be given a topic config': function() {
|
||||||
|
client = new addon.KafkaConsumer(defaultConfig, {});
|
||||||
|
},
|
||||||
|
'throws us an error if we provide an invalid configuration value': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
client = new addon.KafkaConsumer({
|
||||||
|
'foo': 'bar'
|
||||||
|
});
|
||||||
|
}, 'should throw because the key is invalid1');
|
||||||
|
},
|
||||||
|
'throws us an error if topic config is given something invalid': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
client = new addon.KafkaConsumer(defaultConfig, { 'foo': 'bar' });
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'ignores function arguments for global configuration': function() {
|
||||||
|
client = new addon.KafkaConsumer({
|
||||||
|
'event_cb': function() {},
|
||||||
|
'group.id': 'mocha-test'
|
||||||
|
}, {});
|
||||||
|
t.ok(client);
|
||||||
|
},
|
||||||
|
'ignores function arguments for topic configuration': function() {
|
||||||
|
client = new addon.KafkaConsumer(defaultConfig, {
|
||||||
|
'partitioner_cb': function() {}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'KafkaConsumer client': {
|
||||||
|
'beforeEach': function() {
|
||||||
|
client = new addon.KafkaConsumer(defaultConfig, {});
|
||||||
|
},
|
||||||
|
'afterEach': function() {
|
||||||
|
client = null;
|
||||||
|
},
|
||||||
|
'is an object': function() {
|
||||||
|
t.equal(typeof(client), 'object');
|
||||||
|
},
|
||||||
|
'requires configuration': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
return new addon.KafkaConsumer();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'has necessary methods from superclass': function() {
|
||||||
|
var methods = ['connect', 'disconnect', 'configureCallbacks', 'getMetadata'];
|
||||||
|
methods.forEach(function(m) {
|
||||||
|
t.equal(typeof(client[m]), 'function', 'Client is missing ' + m + ' method');
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'has necessary bindings for librdkafka 1:1 binding': function() {
|
||||||
|
var methods = ['assign', 'unassign', 'subscribe'];
|
||||||
|
methods.forEach(function(m) {
|
||||||
|
t.equal(typeof(client[m]), 'function', 'Client is missing ' + m + ' method');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
8
test/error.spec.js
Normal file
8
test/error.spec.js
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
8
test/index.spec.js
Normal file
8
test/index.spec.js
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
298
test/kafka-consumer-stream.spec.js
Normal file
298
test/kafka-consumer-stream.spec.js
Normal file
@ -0,0 +1,298 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var KafkaConsumerStream = require('../lib/kafka-consumer-stream');
|
||||||
|
var t = require('assert');
|
||||||
|
var Writable = require('stream').Writable;
|
||||||
|
var Emitter = require('events');
|
||||||
|
|
||||||
|
var fakeClient;
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'KafkaConsumerStream stream': {
|
||||||
|
'beforeEach': function() {
|
||||||
|
fakeClient = new Emitter();
|
||||||
|
fakeClient._isConnecting = false;
|
||||||
|
fakeClient._isConnected = true;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
fakeClient.unsubscribe = function() {
|
||||||
|
this.emit('unsubscribed');
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
fakeClient.disconnect = function(cb) {
|
||||||
|
this.emit('disconnected');
|
||||||
|
if (cb) {
|
||||||
|
t.equal(typeof cb, 'function');
|
||||||
|
setImmediate(cb);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
fakeClient.consume = function(size, cb) {
|
||||||
|
if (!size) {
|
||||||
|
cb = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
t.equal(typeof cb, 'function',
|
||||||
|
'Provided callback should always be a function');
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(null, [{
|
||||||
|
value: Buffer.from('test'),
|
||||||
|
key: 'testkey',
|
||||||
|
offset: 1
|
||||||
|
}]);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
fakeClient.subscribe = function(topics) {
|
||||||
|
t.equal(Array.isArray(topics), true);
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
},
|
||||||
|
|
||||||
|
'exports a stream class': function() {
|
||||||
|
t.equal(typeof(KafkaConsumerStream), 'function');
|
||||||
|
},
|
||||||
|
|
||||||
|
'can be instantiated': function() {
|
||||||
|
t.equal(typeof new KafkaConsumerStream(fakeClient, {
|
||||||
|
topics: 'topic'
|
||||||
|
}), 'object');
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly reads off the fake client': function(cb) {
|
||||||
|
var stream = new KafkaConsumerStream(fakeClient, {
|
||||||
|
topics: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
stream.once('readable', function() {
|
||||||
|
var message = stream.read();
|
||||||
|
t.notEqual(message, null);
|
||||||
|
t.ok(Buffer.isBuffer(message.value));
|
||||||
|
t.equal('test', message.value.toString());
|
||||||
|
t.equal('testkey', message.key);
|
||||||
|
t.equal(typeof message.offset, 'number');
|
||||||
|
stream.pause();
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly reads off the fake with a topic function': function(cb) {
|
||||||
|
fakeClient._metadata = {
|
||||||
|
orig_broker_id: 1,
|
||||||
|
orig_broker_name: "broker_name",
|
||||||
|
brokers: [
|
||||||
|
{
|
||||||
|
id: 1,
|
||||||
|
host: 'localhost',
|
||||||
|
port: 40
|
||||||
|
}
|
||||||
|
],
|
||||||
|
topics: [
|
||||||
|
{
|
||||||
|
name: 'awesome-topic',
|
||||||
|
partitions: [
|
||||||
|
{
|
||||||
|
id: 1,
|
||||||
|
leader: 20,
|
||||||
|
replicas: [1, 2],
|
||||||
|
isrs: [1, 2]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new KafkaConsumerStream(fakeClient, {
|
||||||
|
topics: function(metadata) {
|
||||||
|
var topics = metadata.topics.map(function(v) {
|
||||||
|
return v.name;
|
||||||
|
});
|
||||||
|
|
||||||
|
return topics;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
fakeClient.subscribe = function(topics) {
|
||||||
|
t.equal(Array.isArray(topics), true);
|
||||||
|
t.equal(topics[0], 'awesome-topic');
|
||||||
|
t.equal(topics.length, 1);
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
stream.once('readable', function() {
|
||||||
|
var message = stream.read();
|
||||||
|
t.notEqual(message, null);
|
||||||
|
t.ok(Buffer.isBuffer(message.value));
|
||||||
|
t.equal('test', message.value.toString());
|
||||||
|
t.equal('testkey', message.key);
|
||||||
|
t.equal(typeof message.offset, 'number');
|
||||||
|
stream.pause();
|
||||||
|
cb();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly reads correct number of messages but does not stop': function(next) {
|
||||||
|
var numMessages = 10;
|
||||||
|
var numReceived = 0;
|
||||||
|
var numSent = 0;
|
||||||
|
|
||||||
|
fakeClient.consume = function(size, cb) {
|
||||||
|
if (numSent < numMessages) {
|
||||||
|
numSent++;
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(null, [{
|
||||||
|
value: Buffer.from('test'),
|
||||||
|
offset: 1
|
||||||
|
}]);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
}
|
||||||
|
};
|
||||||
|
var stream = new KafkaConsumerStream(fakeClient, {
|
||||||
|
topics: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
// Ignore
|
||||||
|
});
|
||||||
|
stream.on('readable', function() {
|
||||||
|
var message = stream.read();
|
||||||
|
numReceived++;
|
||||||
|
t.notEqual(message, null);
|
||||||
|
t.ok(Buffer.isBuffer(message.value));
|
||||||
|
t.equal(typeof message.offset, 'number');
|
||||||
|
if (numReceived === numMessages) {
|
||||||
|
// give it a second to get an error
|
||||||
|
next();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'can be piped around': function(cb) {
|
||||||
|
var stream = new KafkaConsumerStream(fakeClient, {
|
||||||
|
topics: 'topic'
|
||||||
|
});
|
||||||
|
var writable = new Writable({
|
||||||
|
write: function(message, encoding, next) {
|
||||||
|
t.notEqual(message, null);
|
||||||
|
t.ok(Buffer.isBuffer(message.value));
|
||||||
|
t.equal(typeof message.offset, 'number');
|
||||||
|
this.cork();
|
||||||
|
cb();
|
||||||
|
},
|
||||||
|
objectMode: true
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.pipe(writable);
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
},
|
||||||
|
|
||||||
|
'streams as batch when specified': function(next) {
|
||||||
|
var numMessages = 10;
|
||||||
|
var numReceived = 0;
|
||||||
|
var numSent = 0;
|
||||||
|
|
||||||
|
fakeClient.consume = function(size, cb) {
|
||||||
|
if (numSent < numMessages) {
|
||||||
|
numSent++;
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(null, [{
|
||||||
|
value: Buffer.from('test'),
|
||||||
|
offset: 1
|
||||||
|
}]);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
}
|
||||||
|
};
|
||||||
|
var stream = new KafkaConsumerStream(fakeClient, {
|
||||||
|
topics: 'topic',
|
||||||
|
streamAsBatch: true
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
// Ignore
|
||||||
|
});
|
||||||
|
stream.on('readable', function() {
|
||||||
|
var messages = stream.read();
|
||||||
|
numReceived++;
|
||||||
|
t.equal(Array.isArray(messages), true);
|
||||||
|
t.equal(messages.length, 1);
|
||||||
|
var message = messages[0];
|
||||||
|
|
||||||
|
t.notEqual(message, null);
|
||||||
|
t.ok(Buffer.isBuffer(message.value));
|
||||||
|
t.equal(typeof message.offset, 'number');
|
||||||
|
if (numReceived === numMessages) {
|
||||||
|
// give it a second to get an error
|
||||||
|
next();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'stops reading on unsubscribe': function(next) {
|
||||||
|
var numMessages = 10;
|
||||||
|
var numReceived = 0;
|
||||||
|
var numSent = 0;
|
||||||
|
|
||||||
|
fakeClient.consume = function(size, cb) {
|
||||||
|
if (numSent < numMessages) {
|
||||||
|
numSent++;
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(null, [{
|
||||||
|
value: Buffer.from('test'),
|
||||||
|
offset: 1
|
||||||
|
}]);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new KafkaConsumerStream(fakeClient, {
|
||||||
|
topics: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
// Ignore
|
||||||
|
});
|
||||||
|
stream.on('readable', function() {
|
||||||
|
var message = stream.read();
|
||||||
|
numReceived++;
|
||||||
|
if (message) {
|
||||||
|
t.ok(Buffer.isBuffer(message.value));
|
||||||
|
t.equal(typeof message.offset, 'number');
|
||||||
|
if (numReceived === numMessages) {
|
||||||
|
// give it a second to get an error
|
||||||
|
fakeClient.emit('unsubscribed');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.on('end', function() {
|
||||||
|
next();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'calls the callback on destroy': function (next) {
|
||||||
|
|
||||||
|
fakeClient.unsubscribe = function () {};
|
||||||
|
var stream = new KafkaConsumerStream(fakeClient, {
|
||||||
|
topics: 'topic'
|
||||||
|
});
|
||||||
|
stream.once('readable', function () {
|
||||||
|
stream.destroy();
|
||||||
|
stream.once('close', next);
|
||||||
|
});
|
||||||
|
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
48
test/kafka-consumer.spec.js
Normal file
48
test/kafka-consumer.spec.js
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var KafkaConsumer = require('../lib/kafka-consumer');
|
||||||
|
var t = require('assert');
|
||||||
|
|
||||||
|
var client;
|
||||||
|
var defaultConfig = {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'group.id': 'kafka-mocha-grp',
|
||||||
|
'metadata.broker.list': 'localhost:9092'
|
||||||
|
};
|
||||||
|
var topicConfig = {};
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'KafkaConsumer client': {
|
||||||
|
'beforeEach': function() {
|
||||||
|
client = new KafkaConsumer(defaultConfig, topicConfig);
|
||||||
|
},
|
||||||
|
'afterEach': function() {
|
||||||
|
client = null;
|
||||||
|
},
|
||||||
|
'does not modify config and clones it': function () {
|
||||||
|
t.deepStrictEqual(defaultConfig, {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'group.id': 'kafka-mocha-grp',
|
||||||
|
'metadata.broker.list': 'localhost:9092'
|
||||||
|
});
|
||||||
|
t.deepStrictEqual(client.globalConfig, {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'group.id': 'kafka-mocha-grp',
|
||||||
|
'metadata.broker.list': 'localhost:9092'
|
||||||
|
});
|
||||||
|
t.notEqual(defaultConfig, client.globalConfig);
|
||||||
|
},
|
||||||
|
'does not modify topic config and clones it': function () {
|
||||||
|
t.deepStrictEqual(topicConfig, {});
|
||||||
|
t.deepStrictEqual(client.topicConfig, {});
|
||||||
|
t.notEqual(topicConfig, client.topicConfig);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
1
test/mocha.opts
Normal file
1
test/mocha.opts
Normal file
@ -0,0 +1 @@
|
|||||||
|
--ui exports
|
55
test/mock.js
Normal file
55
test/mock.js
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
'use strict';
|
||||||
|
|
||||||
|
var net = require('net');
|
||||||
|
var util = require('util');
|
||||||
|
var Emitter = require('events');
|
||||||
|
|
||||||
|
function KafkaServer(config) {
|
||||||
|
if (!(this instanceof KafkaServer)) {
|
||||||
|
return new KafkaServer(config);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config === undefined) {
|
||||||
|
config = {};
|
||||||
|
} else if (typeof config !== 'object') {
|
||||||
|
throw new TypeError('"config" must be an object');
|
||||||
|
}
|
||||||
|
|
||||||
|
Emitter.call(this);
|
||||||
|
|
||||||
|
var self = this;
|
||||||
|
|
||||||
|
this.socket = net.createServer(function(socket) {
|
||||||
|
socket.end();
|
||||||
|
}); //.unref();
|
||||||
|
|
||||||
|
this.socket.on('error', function(err) {
|
||||||
|
console.error(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
this.socket.listen({
|
||||||
|
port: 9092,
|
||||||
|
host: 'localhost'
|
||||||
|
}, function() {
|
||||||
|
self.address = self.socket.address();
|
||||||
|
self.emit('ready');
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
util.inherits(KafkaServer, Emitter);
|
||||||
|
|
||||||
|
KafkaServer.prototype.close = function(cb) {
|
||||||
|
this.socket.close(cb);
|
||||||
|
};
|
||||||
|
|
||||||
|
module.exports = KafkaServer;
|
723
test/producer-stream.spec.js
Normal file
723
test/producer-stream.spec.js
Normal file
@ -0,0 +1,723 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var ProducerStream = require('../lib/producer-stream');
|
||||||
|
var t = require('assert');
|
||||||
|
var Readable = require('stream').Readable;
|
||||||
|
var Emitter = require('events');
|
||||||
|
|
||||||
|
var fakeClient;
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'ProducerStream stream': {
|
||||||
|
'beforeEach': function() {
|
||||||
|
fakeClient = new Emitter();
|
||||||
|
|
||||||
|
fakeClient._isConnected = true;
|
||||||
|
fakeClient._isConnecting = false;
|
||||||
|
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
fakeClient.connect = function(opts, cb) {
|
||||||
|
setImmediate(function() {
|
||||||
|
this.emit('ready');
|
||||||
|
}.bind(this));
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
fakeClient.disconnect = function(cb) {
|
||||||
|
setImmediate(function() {
|
||||||
|
this.emit('disconnected');
|
||||||
|
}.bind(this));
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
fakeClient.poll = function() {
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
fakeClient.setPollInterval = function() {
|
||||||
|
return this;
|
||||||
|
};
|
||||||
|
},
|
||||||
|
|
||||||
|
'exports a stream class': function() {
|
||||||
|
t.equal(typeof(ProducerStream), 'function');
|
||||||
|
},
|
||||||
|
|
||||||
|
'in buffer mode': {
|
||||||
|
'requires a topic be provided when running in buffer mode': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
var x = new ProducerStream(fakeClient, {});
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'can be instantiated': function() {
|
||||||
|
t.equal(typeof new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
}), 'object');
|
||||||
|
},
|
||||||
|
|
||||||
|
'does not run connect if the client is already connected': function(cb) {
|
||||||
|
fakeClient.connect = function() {
|
||||||
|
t.fail('Should not run connect if the client is already connected');
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
|
||||||
|
setTimeout(cb, 10);
|
||||||
|
},
|
||||||
|
|
||||||
|
'does run connect if the client is not already connected': function(cb) {
|
||||||
|
fakeClient._isConnected = false;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
fakeClient.once('ready', cb);
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'forwards connectOptions to client options when provided': function(cb) {
|
||||||
|
var testClientOptions = { timeout: 3000 };
|
||||||
|
|
||||||
|
fakeClient._isConnected = false;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
var fakeConnect = fakeClient.connect;
|
||||||
|
fakeClient.connect = function(opts, callback) {
|
||||||
|
t.deepEqual(opts, testClientOptions);
|
||||||
|
cb();
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic',
|
||||||
|
connectOptions: testClientOptions
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'automatically disconnects when autoclose is not provided': function(cb) {
|
||||||
|
fakeClient.once('disconnected', cb);
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.end();
|
||||||
|
},
|
||||||
|
|
||||||
|
'does not automatically disconnect when autoclose is set to false': function(done) {
|
||||||
|
fakeClient.once('disconnected', function() {
|
||||||
|
t.fail('Should not run disconnect');
|
||||||
|
});
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic',
|
||||||
|
autoClose: false
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.end();
|
||||||
|
|
||||||
|
setTimeout(done, 10);
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly reads off the fake client': function(done) {
|
||||||
|
var message;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome');
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write(Buffer.from('Awesome'));
|
||||||
|
},
|
||||||
|
|
||||||
|
'passes a topic string if options are not provided': function(done) {
|
||||||
|
var message;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome');
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write(Buffer.from('Awesome'));
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly handles queue errors': function(done) {
|
||||||
|
var message;
|
||||||
|
|
||||||
|
var first = true;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome');
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
if (first) {
|
||||||
|
first = false;
|
||||||
|
var err = new Error('Queue full');
|
||||||
|
err.code = -184;
|
||||||
|
throw err;
|
||||||
|
} else {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write(Buffer.from('Awesome'));
|
||||||
|
},
|
||||||
|
|
||||||
|
'errors out when a non-queue related error occurs': function(done) {
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
var err = new Error('ERR_MSG_SIZE_TOO_LARGE ');
|
||||||
|
err.code = 10;
|
||||||
|
throw err;
|
||||||
|
};
|
||||||
|
|
||||||
|
fakeClient.on('disconnected', function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.equal(err.code, 10, 'Error was unexpected');
|
||||||
|
// This is good
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write(Buffer.from('Awesome'));
|
||||||
|
},
|
||||||
|
|
||||||
|
'errors out when a non-queue related error occurs but does not disconnect if autoclose is false': function(done) {
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
var err = new Error('ERR_MSG_SIZE_TOO_LARGE ');
|
||||||
|
err.code = 10;
|
||||||
|
throw err;
|
||||||
|
};
|
||||||
|
|
||||||
|
fakeClient.on('disconnected', function() {
|
||||||
|
t.fail('Should not try to disconnect');
|
||||||
|
});
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic',
|
||||||
|
autoClose: false
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.equal(err.code, 10, 'Error was unexpected');
|
||||||
|
// This is good
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write(Buffer.from('Awesome'));
|
||||||
|
|
||||||
|
setTimeout(done, 10);
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly reads more than one message in order': function(done) {
|
||||||
|
|
||||||
|
var message;
|
||||||
|
var currentMessage = 0;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
currentMessage++;
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome' + currentMessage);
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
if (currentMessage === 2) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write(Buffer.from('Awesome1'));
|
||||||
|
stream.write(Buffer.from('Awesome2'));
|
||||||
|
},
|
||||||
|
|
||||||
|
'can be piped into a readable': function(done) {
|
||||||
|
|
||||||
|
var message;
|
||||||
|
var currentMessage = 0;
|
||||||
|
var iteration = 0;
|
||||||
|
|
||||||
|
var readable = new Readable({
|
||||||
|
read: function(size) {
|
||||||
|
iteration++;
|
||||||
|
if (iteration > 1) {
|
||||||
|
|
||||||
|
} else {
|
||||||
|
this.push('Awesome1');
|
||||||
|
this.push('Awesome2');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
currentMessage++;
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome' + currentMessage);
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
if (currentMessage === 2) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
readable.pipe(stream);
|
||||||
|
},
|
||||||
|
'can drain buffered chunks': function(done) {
|
||||||
|
|
||||||
|
var message;
|
||||||
|
var currentMessage = 0;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
currentMessage++;
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome' + currentMessage);
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
if (currentMessage === 3) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
fakeClient._isConnected = false;
|
||||||
|
fakeClient._isConnecting = true;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
stream.write(Buffer.from('Awesome1'));
|
||||||
|
stream.write(Buffer.from('Awesome2'));
|
||||||
|
stream.write(Buffer.from('Awesome3'));
|
||||||
|
|
||||||
|
fakeClient._isConnected = true;
|
||||||
|
fakeClient._isConnecting = false;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
fakeClient.connect();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
'in objectMode': {
|
||||||
|
'can be instantiated': function() {
|
||||||
|
t.equal(typeof new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true
|
||||||
|
}), 'object');
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly produces message objects': function(done) {
|
||||||
|
var _timestamp = Date.now();
|
||||||
|
var _opaque = {
|
||||||
|
foo: 'bar'
|
||||||
|
};
|
||||||
|
var _headers = {
|
||||||
|
header: 'header value'
|
||||||
|
};
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key, timestamp, opaque, headers) {
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome');
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
t.equal(partition, 10);
|
||||||
|
t.equal(key, 'key');
|
||||||
|
t.deepEqual(_opaque, opaque);
|
||||||
|
t.deepEqual(_timestamp, timestamp);
|
||||||
|
t.deepEqual(_headers, headers);
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write({
|
||||||
|
topic: 'topic',
|
||||||
|
value: Buffer.from('Awesome'),
|
||||||
|
partition: 10,
|
||||||
|
key: 'key',
|
||||||
|
timestamp: _timestamp,
|
||||||
|
opaque: _opaque,
|
||||||
|
headers: _headers
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly handles queue errors': function(done) {
|
||||||
|
var message;
|
||||||
|
|
||||||
|
var first = true;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome');
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
t.equal(partition, 10);
|
||||||
|
t.equal(key, 'key');
|
||||||
|
if (first) {
|
||||||
|
first = false;
|
||||||
|
var err = new Error('Queue full');
|
||||||
|
err.code = -184;
|
||||||
|
throw err;
|
||||||
|
} else {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write({
|
||||||
|
topic: 'topic',
|
||||||
|
value: Buffer.from('Awesome'),
|
||||||
|
partition: 10,
|
||||||
|
key: 'key'
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'errors out when a non-queue related error occurs': function(done) {
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
var err = new Error('ERR_MSG_SIZE_TOO_LARGE ');
|
||||||
|
err.code = 10;
|
||||||
|
throw err;
|
||||||
|
};
|
||||||
|
|
||||||
|
fakeClient.on('disconnected', function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.equal(err.code, 10, 'Error was unexpected');
|
||||||
|
// This is good
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write(Buffer.from('Awesome'));
|
||||||
|
},
|
||||||
|
|
||||||
|
'errors out when a non-queue related error occurs but does not disconnect if autoclose is false': function(done) {
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
var err = new Error('ERR_MSG_SIZE_TOO_LARGE ');
|
||||||
|
err.code = 10;
|
||||||
|
throw err;
|
||||||
|
};
|
||||||
|
|
||||||
|
fakeClient.on('disconnected', function() {
|
||||||
|
t.fail('Should not try to disconnect');
|
||||||
|
});
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true,
|
||||||
|
autoClose: false
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.equal(err.code, 10, 'Error was unexpected');
|
||||||
|
// This is good
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
|
||||||
|
setTimeout(done, 10);
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly reads more than one message in order': function(done) {
|
||||||
|
|
||||||
|
var message;
|
||||||
|
var currentMessage = 0;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
currentMessage++;
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome' + currentMessage);
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
if (currentMessage === 2) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome1'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome2'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'can be piped into a readable': function(done) {
|
||||||
|
|
||||||
|
var message;
|
||||||
|
var currentMessage = 0;
|
||||||
|
var iteration = 0;
|
||||||
|
|
||||||
|
var readable = new Readable({
|
||||||
|
objectMode: true,
|
||||||
|
read: function(size) {
|
||||||
|
iteration++;
|
||||||
|
if (iteration > 1) {
|
||||||
|
|
||||||
|
} else {
|
||||||
|
this.push({
|
||||||
|
topic: 'topic',
|
||||||
|
value: Buffer.from('Awesome1')
|
||||||
|
});
|
||||||
|
this.push({
|
||||||
|
topic: 'topic',
|
||||||
|
value: Buffer.from('Awesome2')
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
currentMessage++;
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome' + currentMessage);
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
if (currentMessage === 2) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
readable.pipe(stream);
|
||||||
|
},
|
||||||
|
|
||||||
|
'can drain buffered messages': function(done) {
|
||||||
|
|
||||||
|
var message;
|
||||||
|
var currentMessage = 0;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
currentMessage++;
|
||||||
|
t.equal('topic', topic);
|
||||||
|
t.equal(message.toString(), 'Awesome' + currentMessage);
|
||||||
|
t.equal(Buffer.isBuffer(message), true);
|
||||||
|
if (currentMessage === 3) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
fakeClient._isConnected = false;
|
||||||
|
fakeClient._isConnecting = true;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome1'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome2'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome3'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
|
||||||
|
fakeClient._isConnected = true;
|
||||||
|
fakeClient._isConnecting = false;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
fakeClient.connect();
|
||||||
|
},
|
||||||
|
|
||||||
|
'properly handles queue errors while draining': function(done) {
|
||||||
|
var message;
|
||||||
|
var currentMessage = 0;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
currentMessage++;
|
||||||
|
if (currentMessage === 3) {
|
||||||
|
var err = new Error('Queue full');
|
||||||
|
err.code = -184;
|
||||||
|
throw err;
|
||||||
|
} else if (currentMessage === 4) {
|
||||||
|
done();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.fail(err);
|
||||||
|
});
|
||||||
|
|
||||||
|
fakeClient._isConnected = false;
|
||||||
|
fakeClient._isConnecting = true;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome1'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome2'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome3'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome4'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
|
||||||
|
fakeClient._isConnected = true;
|
||||||
|
fakeClient._isConnecting = false;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
fakeClient.connect();
|
||||||
|
},
|
||||||
|
|
||||||
|
'errors out for non-queue related errors while draining': function (done) {
|
||||||
|
var currentMessage = 0;
|
||||||
|
|
||||||
|
fakeClient.produce = function(topic, partition, message, key) {
|
||||||
|
currentMessage++;
|
||||||
|
if (currentMessage === 3) {
|
||||||
|
var err = new Error('ERR_MSG_SIZE_TOO_LARGE ');
|
||||||
|
err.code = 10;
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
fakeClient.on('disconnected', function() {
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
|
||||||
|
var stream = new ProducerStream(fakeClient, {
|
||||||
|
objectMode: true
|
||||||
|
});
|
||||||
|
stream.on('error', function(err) {
|
||||||
|
t.equal(err.code, 10, 'Error was unexpected');
|
||||||
|
// This is good
|
||||||
|
});
|
||||||
|
|
||||||
|
fakeClient._isConnected = false;
|
||||||
|
fakeClient._isConnecting = true;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome1'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome2'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome3'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
stream.write({
|
||||||
|
value: Buffer.from('Awesome4'),
|
||||||
|
topic: 'topic'
|
||||||
|
});
|
||||||
|
|
||||||
|
fakeClient._isConnected = true;
|
||||||
|
fakeClient._isConnecting = false;
|
||||||
|
fakeClient.isConnected = function() {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
fakeClient.connect();
|
||||||
|
},
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
};
|
100
test/producer.spec.js
Normal file
100
test/producer.spec.js
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Producer = require('../lib/producer');
|
||||||
|
var t = require('assert');
|
||||||
|
// var Mock = require('./mock');
|
||||||
|
|
||||||
|
var client;
|
||||||
|
var defaultConfig = {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'metadata.broker.list': 'localhost:9092',
|
||||||
|
'socket.timeout.ms': 250
|
||||||
|
};
|
||||||
|
var topicConfig = {};
|
||||||
|
|
||||||
|
var server;
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'Producer client': {
|
||||||
|
'beforeEach': function() {
|
||||||
|
client = new Producer(defaultConfig, topicConfig);
|
||||||
|
},
|
||||||
|
'afterEach': function() {
|
||||||
|
client = null;
|
||||||
|
},
|
||||||
|
'is an object': function() {
|
||||||
|
t.equal(typeof(client), 'object');
|
||||||
|
},
|
||||||
|
'requires configuration': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
return new Producer();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'has necessary methods from superclass': function() {
|
||||||
|
var methods = ['connect', 'disconnect', 'getMetadata'];
|
||||||
|
methods.forEach(function(m) {
|
||||||
|
t.equal(typeof(client[m]), 'function', 'Client is missing ' + m + ' method');
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'has "_disconnect" override': function() {
|
||||||
|
t.equal(typeof(client._disconnect), 'function', 'Producer is missing base _disconnect method');
|
||||||
|
},
|
||||||
|
'does not modify config and clones it': function () {
|
||||||
|
t.deepStrictEqual(defaultConfig, {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'metadata.broker.list': 'localhost:9092',
|
||||||
|
'socket.timeout.ms': 250
|
||||||
|
});
|
||||||
|
t.deepStrictEqual(client.globalConfig, {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'metadata.broker.list': 'localhost:9092',
|
||||||
|
'socket.timeout.ms': 250
|
||||||
|
});
|
||||||
|
t.notEqual(defaultConfig, client.globalConfig);
|
||||||
|
},
|
||||||
|
'does not modify topic config and clones it': function () {
|
||||||
|
t.deepStrictEqual(topicConfig, {});
|
||||||
|
t.deepStrictEqual(client.topicConfig, {});
|
||||||
|
t.notEqual(topicConfig, client.topicConfig);
|
||||||
|
},
|
||||||
|
'disconnect method': {
|
||||||
|
'calls flush before it runs': function(next) {
|
||||||
|
var providedTimeout = 1;
|
||||||
|
|
||||||
|
client.flush = function(timeout, cb) {
|
||||||
|
t.equal(providedTimeout, timeout, 'Timeouts do not match');
|
||||||
|
t.equal(typeof(cb), 'function');
|
||||||
|
setImmediate(cb);
|
||||||
|
};
|
||||||
|
|
||||||
|
client._disconnect = function(cb) {
|
||||||
|
setImmediate(cb);
|
||||||
|
};
|
||||||
|
|
||||||
|
client.disconnect(providedTimeout, next);
|
||||||
|
},
|
||||||
|
'provides a default timeout when none is provided': function(next) {
|
||||||
|
client.flush = function(timeout, cb) {
|
||||||
|
t.notEqual(timeout, undefined);
|
||||||
|
t.notEqual(timeout, null);
|
||||||
|
t.notEqual(timeout, 0);
|
||||||
|
t.equal(typeof(cb), 'function');
|
||||||
|
setImmediate(cb);
|
||||||
|
};
|
||||||
|
|
||||||
|
client._disconnect = function(cb) {
|
||||||
|
setImmediate(cb);
|
||||||
|
};
|
||||||
|
|
||||||
|
client.disconnect(next);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
496
test/producer/high-level-producer.spec.js
Normal file
496
test/producer/high-level-producer.spec.js
Normal file
@ -0,0 +1,496 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var HighLevelProducer = require('../../lib/producer/high-level-producer');
|
||||||
|
var t = require('assert');
|
||||||
|
var Promise = require('bluebird');
|
||||||
|
// var Mock = require('./mock');
|
||||||
|
|
||||||
|
var client;
|
||||||
|
var defaultConfig = {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'metadata.broker.list': 'localhost:9092',
|
||||||
|
'socket.timeout.ms': 250
|
||||||
|
};
|
||||||
|
var topicConfig = {};
|
||||||
|
|
||||||
|
var server;
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'High Level Producer client': {
|
||||||
|
'beforeEach': function() {
|
||||||
|
client = new HighLevelProducer(defaultConfig, topicConfig);
|
||||||
|
},
|
||||||
|
'afterEach': function() {
|
||||||
|
client = null;
|
||||||
|
},
|
||||||
|
'is an object': function() {
|
||||||
|
t.equal(typeof(client), 'object');
|
||||||
|
},
|
||||||
|
'requires configuration': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
return new HighLevelProducer();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'has necessary methods from superclass': function() {
|
||||||
|
var methods = ['_oldProduce'];
|
||||||
|
methods.forEach(function(m) {
|
||||||
|
t.equal(typeof(client[m]), 'function', 'Client is missing ' + m + ' method');
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'does not modify config and clones it': function () {
|
||||||
|
t.deepStrictEqual(defaultConfig, {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'metadata.broker.list': 'localhost:9092',
|
||||||
|
'socket.timeout.ms': 250
|
||||||
|
});
|
||||||
|
t.deepStrictEqual(client.globalConfig, {
|
||||||
|
'client.id': 'kafka-mocha',
|
||||||
|
'metadata.broker.list': 'localhost:9092',
|
||||||
|
'socket.timeout.ms': 250
|
||||||
|
});
|
||||||
|
t.notEqual(defaultConfig, client.globalConfig);
|
||||||
|
},
|
||||||
|
'does not modify topic config and clones it': function () {
|
||||||
|
t.deepStrictEqual(topicConfig, {});
|
||||||
|
t.deepStrictEqual(client.topicConfig, {});
|
||||||
|
t.notEqual(topicConfig, client.topicConfig);
|
||||||
|
},
|
||||||
|
'produce method': {
|
||||||
|
'headers support': function(next) {
|
||||||
|
var v = 'foo';
|
||||||
|
var k = 'key';
|
||||||
|
var h = [
|
||||||
|
{ key1: "value1A" },
|
||||||
|
{ key1: "value1B" },
|
||||||
|
{ key2: "value2" },
|
||||||
|
{ key1: "value1C" },
|
||||||
|
];
|
||||||
|
var jsonH = JSON.stringify(h);
|
||||||
|
|
||||||
|
client._oldProduce = function(topic, partition, value, key, timestamp, opaque, headers) {
|
||||||
|
t.equal(value, 'foo');
|
||||||
|
t.equal(key, 'key');
|
||||||
|
t.equal(JSON.stringify(headers), jsonH);
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, h, function() {
|
||||||
|
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'can use a custom serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
var valueSerializerCalled = false;
|
||||||
|
var keySerializerCalled = false;
|
||||||
|
|
||||||
|
client._oldProduce = function(topic, partition, v, k, timestamp, opaque) {
|
||||||
|
t.equal(valueSerializerCalled, true);
|
||||||
|
t.equal(keySerializerCalled, true);
|
||||||
|
t.deepEqual(v, Buffer.from('foo'));
|
||||||
|
t.equal(k, 'key');
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_) {
|
||||||
|
valueSerializerCalled = true;
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
return Buffer.from('foo');
|
||||||
|
});
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_) {
|
||||||
|
keySerializerCalled = true;
|
||||||
|
t.deepEqual(_, k);
|
||||||
|
return 'key';
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function() {
|
||||||
|
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'can use a value asynchronous custom serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
var valueSerializerCalled = false;
|
||||||
|
var keySerializerCalled = false;
|
||||||
|
|
||||||
|
client._oldProduce = function(topic, partition, v, k, timestamp, opaque) {
|
||||||
|
t.equal(valueSerializerCalled, true);
|
||||||
|
t.equal(keySerializerCalled, true);
|
||||||
|
t.deepEqual(v, Buffer.from('foo'));
|
||||||
|
t.equal(k, 'key');
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_, cb) {
|
||||||
|
valueSerializerCalled = true;
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(null, Buffer.from('foo'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_) {
|
||||||
|
keySerializerCalled = true;
|
||||||
|
t.deepEqual(_, k);
|
||||||
|
return 'key';
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function() {
|
||||||
|
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'can use a key asynchronous custom serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
var valueSerializerCalled = false;
|
||||||
|
var keySerializerCalled = false;
|
||||||
|
|
||||||
|
client._oldProduce = function(topic, partition, v, k, timestamp, opaque) {
|
||||||
|
t.equal(valueSerializerCalled, true);
|
||||||
|
t.equal(keySerializerCalled, true);
|
||||||
|
t.deepEqual(v, Buffer.from('foo'));
|
||||||
|
t.equal(k, 'key');
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_) {
|
||||||
|
valueSerializerCalled = true;
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
return Buffer.from('foo');
|
||||||
|
});
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_, cb) {
|
||||||
|
keySerializerCalled = true;
|
||||||
|
t.deepEqual(_, k);
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(null, 'key');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function() {
|
||||||
|
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'can use two asynchronous custom serializers': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
var valueSerializerCalled = false;
|
||||||
|
var keySerializerCalled = false;
|
||||||
|
|
||||||
|
client._oldProduce = function(topic, partition, v, k, timestamp, opaque) {
|
||||||
|
t.equal(valueSerializerCalled, true);
|
||||||
|
t.equal(keySerializerCalled, true);
|
||||||
|
t.deepEqual(v, Buffer.from('foo'));
|
||||||
|
t.equal(k, 'key');
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_, cb) {
|
||||||
|
valueSerializerCalled = true;
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(null, Buffer.from('foo'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_, cb) {
|
||||||
|
keySerializerCalled = true;
|
||||||
|
t.deepEqual(_, k);
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(null, 'key');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function() {
|
||||||
|
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
// Promise API
|
||||||
|
'can use a value promise-based custom serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
var valueSerializerCalled = false;
|
||||||
|
var keySerializerCalled = false;
|
||||||
|
|
||||||
|
client._oldProduce = function(topic, partition, v, k, timestamp, opaque) {
|
||||||
|
t.equal(valueSerializerCalled, true);
|
||||||
|
t.equal(keySerializerCalled, true);
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_) {
|
||||||
|
valueSerializerCalled = true;
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
return new Promise(function(resolve) {
|
||||||
|
resolve(Buffer.from(''));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_) {
|
||||||
|
keySerializerCalled = true;
|
||||||
|
t.deepEqual(_, k);
|
||||||
|
return null;
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function() {
|
||||||
|
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'can use a key promise-based custom serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
var valueSerializerCalled = false;
|
||||||
|
var keySerializerCalled = false;
|
||||||
|
|
||||||
|
client._oldProduce = function(topic, partition, v, k, timestamp, opaque) {
|
||||||
|
t.equal(valueSerializerCalled, true);
|
||||||
|
t.equal(keySerializerCalled, true);
|
||||||
|
t.deepEqual(v, Buffer.from('foo'));
|
||||||
|
t.equal(k, 'key');
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_) {
|
||||||
|
valueSerializerCalled = true;
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
return Buffer.from('foo');
|
||||||
|
});
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_) {
|
||||||
|
keySerializerCalled = true;
|
||||||
|
t.deepEqual(_, k);
|
||||||
|
return new Promise(function(resolve) {
|
||||||
|
resolve('key');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function() {
|
||||||
|
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'can use two promise-based custom serializers': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
var valueSerializerCalled = false;
|
||||||
|
var keySerializerCalled = false;
|
||||||
|
|
||||||
|
client._oldProduce = function(topic, partition, v, k, timestamp, opaque) {
|
||||||
|
t.equal(valueSerializerCalled, true);
|
||||||
|
t.equal(keySerializerCalled, true);
|
||||||
|
t.deepEqual(v, Buffer.from('foo'));
|
||||||
|
t.equal(k, 'key');
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_) {
|
||||||
|
valueSerializerCalled = true;
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
return new Promise(function(resolve) {
|
||||||
|
resolve(Buffer.from('foo'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_) {
|
||||||
|
keySerializerCalled = true;
|
||||||
|
t.deepEqual(_, k);
|
||||||
|
return new Promise(function(resolve) {
|
||||||
|
resolve('key');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function() {
|
||||||
|
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'bubbles up serializer errors in an async value serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_, cb) {
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(new Error('even together we failed'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function(err) {
|
||||||
|
t.equal(typeof err, 'object', 'an error should be returned');
|
||||||
|
next();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'bubbles up serializer errors in an async key serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_, cb) {
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
setImmediate(function() {
|
||||||
|
cb(new Error('even together we failed'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function(err) {
|
||||||
|
t.equal(typeof err, 'object', 'an error should be returned');
|
||||||
|
next();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'bubbles up serializer errors in a sync value serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_, cb) {
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
throw new Error('even together we failed');
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function(err) {
|
||||||
|
t.equal(typeof err, 'object', 'an error should be returned');
|
||||||
|
next();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'bubbles up serializer errors in a sync key serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_, cb) {
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
throw new Error('even together we failed');
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function(err) {
|
||||||
|
t.equal(typeof err, 'object', 'an error should be returned');
|
||||||
|
next();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'bubbles up serializer errors in a promise-based value serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setValueSerializer(function(_) {
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
|
||||||
|
return new Promise(function (resolve, reject) {
|
||||||
|
reject(new Error('even together we failed'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function(err) {
|
||||||
|
t.equal(typeof err, 'object', 'an error should be returned');
|
||||||
|
next();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
'bubbles up serializer errors in a promise-based key serializer': function(next) {
|
||||||
|
var v = {
|
||||||
|
disparaging: 'hyena',
|
||||||
|
};
|
||||||
|
|
||||||
|
var k = {
|
||||||
|
delicious: 'cookie',
|
||||||
|
};
|
||||||
|
|
||||||
|
client.setKeySerializer(function(_) {
|
||||||
|
t.deepEqual(_, v);
|
||||||
|
|
||||||
|
return new Promise(function(resolve, reject) {
|
||||||
|
return new Promise(function (resolve, reject) {
|
||||||
|
reject(new Error('even together we failed'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
client.produce('tawpic', 0, v, k, null, function(err) {
|
||||||
|
t.equal(typeof err, 'object', 'an error should be returned');
|
||||||
|
next();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
53
test/tools/ref-counter.spec.js
Normal file
53
test/tools/ref-counter.spec.js
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
var t = require('assert');
|
||||||
|
var RefCounter = require('../../lib/tools/ref-counter');
|
||||||
|
|
||||||
|
function noop() {}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'RefCounter': {
|
||||||
|
'is an object': function() {
|
||||||
|
t.equal(typeof(RefCounter), 'function');
|
||||||
|
},
|
||||||
|
'should become active when incremented': function(next) {
|
||||||
|
var refCounter = new RefCounter(function() { next(); }, noop);
|
||||||
|
|
||||||
|
refCounter.increment();
|
||||||
|
},
|
||||||
|
'should become inactive when incremented and decremented': function(next) {
|
||||||
|
var refCounter = new RefCounter(noop, function() { next(); });
|
||||||
|
|
||||||
|
refCounter.increment();
|
||||||
|
setImmediate(function() {
|
||||||
|
refCounter.decrement();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'should support multiple accesses': function(next) {
|
||||||
|
var refCounter = new RefCounter(noop, function() { next(); });
|
||||||
|
|
||||||
|
refCounter.increment();
|
||||||
|
refCounter.increment();
|
||||||
|
refCounter.decrement();
|
||||||
|
setImmediate(function() {
|
||||||
|
refCounter.decrement();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'should be reusable': function(next) {
|
||||||
|
var numActives = 0;
|
||||||
|
var numPassives = 0;
|
||||||
|
var refCounter = new RefCounter(function() {
|
||||||
|
numActives += 1;
|
||||||
|
}, function() {
|
||||||
|
numPassives += 1;
|
||||||
|
|
||||||
|
if (numActives === 2 && numPassives === 2) {
|
||||||
|
next();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
refCounter.increment();
|
||||||
|
refCounter.decrement();
|
||||||
|
refCounter.increment();
|
||||||
|
refCounter.decrement();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
104
test/topic-partition.spec.js
Normal file
104
test/topic-partition.spec.js
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var TopicPartition = require('../lib/topic-partition');
|
||||||
|
var Topic = require('../lib/topic');
|
||||||
|
|
||||||
|
var t = require('assert');
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'TopicPartition': {
|
||||||
|
'is a function': function() {
|
||||||
|
t.equal(typeof(TopicPartition), 'function');
|
||||||
|
},
|
||||||
|
'be constructable': function() {
|
||||||
|
var toppar = new TopicPartition('topic', 1, 0);
|
||||||
|
|
||||||
|
t.equal(toppar.topic, 'topic');
|
||||||
|
t.equal(toppar.offset, 0);
|
||||||
|
t.equal(toppar.partition, 1);
|
||||||
|
},
|
||||||
|
'be creatable using 0 as the partition': function() {
|
||||||
|
var toppar = new TopicPartition('topic', 0, 0);
|
||||||
|
|
||||||
|
t.equal(toppar.topic, 'topic');
|
||||||
|
t.equal(toppar.offset, 0);
|
||||||
|
t.equal(toppar.partition, 0);
|
||||||
|
},
|
||||||
|
'throw if partition is null or undefined': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
var tp = new TopicPartition('topic', undefined, 0);
|
||||||
|
});
|
||||||
|
|
||||||
|
t.throws(function() {
|
||||||
|
var tp = new TopicPartition('topic', null, 0);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
'sets offset to stored by default': function() {
|
||||||
|
var toppar = new TopicPartition('topic', 1);
|
||||||
|
|
||||||
|
t.equal(toppar.topic, 'topic');
|
||||||
|
t.equal(toppar.partition, 1);
|
||||||
|
t.equal(toppar.offset, Topic.OFFSET_STORED);
|
||||||
|
},
|
||||||
|
'sets offset to end if "end" is provided"': function() {
|
||||||
|
var toppar = new TopicPartition('topic', 1, 'end');
|
||||||
|
|
||||||
|
t.equal(toppar.topic, 'topic');
|
||||||
|
t.equal(toppar.partition, 1);
|
||||||
|
t.equal(toppar.offset, Topic.OFFSET_END);
|
||||||
|
},
|
||||||
|
'sets offset to end if "latest" is provided"': function() {
|
||||||
|
var toppar = new TopicPartition('topic', 1, 'latest');
|
||||||
|
|
||||||
|
t.equal(toppar.topic, 'topic');
|
||||||
|
t.equal(toppar.partition, 1);
|
||||||
|
t.equal(toppar.offset, Topic.OFFSET_END);
|
||||||
|
},
|
||||||
|
'sets offset to beginning if "beginning" is provided"': function() {
|
||||||
|
var toppar = new TopicPartition('topic', 1, 'beginning');
|
||||||
|
|
||||||
|
t.equal(toppar.topic, 'topic');
|
||||||
|
t.equal(toppar.partition, 1);
|
||||||
|
t.equal(toppar.offset, Topic.OFFSET_BEGINNING);
|
||||||
|
},
|
||||||
|
'sets offset to start if "beginning" is provided"': function() {
|
||||||
|
var toppar = new TopicPartition('topic', 1, 'beginning');
|
||||||
|
|
||||||
|
t.equal(toppar.topic, 'topic');
|
||||||
|
t.equal(toppar.partition, 1);
|
||||||
|
t.equal(toppar.offset, Topic.OFFSET_BEGINNING);
|
||||||
|
},
|
||||||
|
'sets offset to stored if "stored" is provided"': function() {
|
||||||
|
var toppar = new TopicPartition('topic', 1, 'stored');
|
||||||
|
|
||||||
|
t.equal(toppar.topic, 'topic');
|
||||||
|
t.equal(toppar.partition, 1);
|
||||||
|
t.equal(toppar.offset, Topic.OFFSET_STORED);
|
||||||
|
},
|
||||||
|
'throws when an invalid special offset is provided"': function() {
|
||||||
|
t.throws(function() {
|
||||||
|
var toppar = new TopicPartition('topic', 1, 'fake');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'TopicPartition.map': {
|
||||||
|
'is a function': function() {
|
||||||
|
t.equal(typeof(TopicPartition.map), 'function');
|
||||||
|
},
|
||||||
|
'converts offsets inside the array': function() {
|
||||||
|
var result = TopicPartition.map([{ topic: 'topic', partition: 1, offset: 'stored' }]);
|
||||||
|
var toppar = result[0];
|
||||||
|
|
||||||
|
t.equal(toppar.topic, 'topic');
|
||||||
|
t.equal(toppar.partition, 1);
|
||||||
|
t.equal(toppar.offset, Topic.OFFSET_STORED);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
46
test/util.spec.js
Normal file
46
test/util.spec.js
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
/*
|
||||||
|
* node-rdkafka - Node.js wrapper for RdKafka C/C++ library
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 Blizzard Entertainment
|
||||||
|
*
|
||||||
|
* This software may be modified and distributed under the terms
|
||||||
|
* of the MIT license. See the LICENSE.txt file for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var shallowCopy = require('../lib/util').shallowCopy;
|
||||||
|
var t = require('assert');
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
'shallowCopy utility': {
|
||||||
|
'returns value itself when it is not an object': function () {
|
||||||
|
t.strictEqual(10, shallowCopy(10));
|
||||||
|
t.strictEqual('str', shallowCopy('str'));
|
||||||
|
t.strictEqual(null, shallowCopy(null));
|
||||||
|
t.strictEqual(undefined, shallowCopy(undefined));
|
||||||
|
t.strictEqual(false, shallowCopy(false));
|
||||||
|
},
|
||||||
|
'returns shallow copy of the passed object': function () {
|
||||||
|
var obj = {
|
||||||
|
sub: { a: 10 },
|
||||||
|
b: 'str',
|
||||||
|
};
|
||||||
|
var copy = shallowCopy(obj);
|
||||||
|
|
||||||
|
t.notEqual(obj, copy);
|
||||||
|
t.deepStrictEqual(obj, copy);
|
||||||
|
t.equal(obj.sub, copy.sub);
|
||||||
|
},
|
||||||
|
'does not copy non-enumerable and inherited properties': function () {
|
||||||
|
var obj = Object.create({
|
||||||
|
a: 10,
|
||||||
|
}, {
|
||||||
|
b: { value: 'str' },
|
||||||
|
c: { value: true, enumerable: true },
|
||||||
|
});
|
||||||
|
var copy = shallowCopy(obj);
|
||||||
|
|
||||||
|
t.notEqual(obj, copy);
|
||||||
|
t.deepStrictEqual(copy, { c: true });
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
30
util/configure.js
Normal file
30
util/configure.js
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
'use strict';
|
||||||
|
|
||||||
|
var query = process.argv[2];
|
||||||
|
|
||||||
|
var fs = require('fs');
|
||||||
|
var path = require('path');
|
||||||
|
|
||||||
|
var baseDir = path.resolve(__dirname, '../');
|
||||||
|
var releaseDir = path.join(baseDir, 'build', 'deps');
|
||||||
|
|
||||||
|
var isWin = /^win/.test(process.platform);
|
||||||
|
|
||||||
|
// Skip running this if we are running on a windows system
|
||||||
|
if (isWin) {
|
||||||
|
process.stderr.write('Skipping run because we are on windows\n');
|
||||||
|
process.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
var childProcess = require('child_process');
|
||||||
|
|
||||||
|
try {
|
||||||
|
childProcess.execSync('./configure --prefix=' + releaseDir + ' --libdir=' + releaseDir, {
|
||||||
|
cwd: baseDir,
|
||||||
|
stdio: [0,1,2]
|
||||||
|
});
|
||||||
|
process.exit(0);
|
||||||
|
} catch (e) {
|
||||||
|
process.stderr.write(e.message + '\n');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
6
util/get-env.js
Normal file
6
util/get-env.js
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
'use strict';
|
||||||
|
|
||||||
|
var env = process.argv[2];
|
||||||
|
var def = process.argv[3] || '';
|
||||||
|
|
||||||
|
process.stdout.write(process.env[env] || def);
|
11
util/test-compile.js
Normal file
11
util/test-compile.js
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
var kafka = require('../lib');
|
||||||
|
|
||||||
|
var p = new kafka.Producer({ 'bootstrap.servers': 'localhost:9092' }, {});
|
||||||
|
|
||||||
|
p.connect({ timeout: 1000 }, function(err) {
|
||||||
|
if (!err) {
|
||||||
|
p.disconnect();
|
||||||
|
} else {
|
||||||
|
process.exit(0);
|
||||||
|
}
|
||||||
|
});
|
100
util/test-producer-delivery.js
Normal file
100
util/test-producer-delivery.js
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
const Kafka = require("../lib/index.js");
|
||||||
|
|
||||||
|
const wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
||||||
|
|
||||||
|
const sendData = async (producer, totalMessages) => {
|
||||||
|
const topic = "node";
|
||||||
|
const msg = "dkfljaskldfajkldsjfklasdjfalk;dsjfkl;asjfdskl;fjda;lkfjsdklfsajlkfjdsklfajsklfjsklanklsalkjkljkasfak";
|
||||||
|
const buffer = Buffer.from(msg);
|
||||||
|
const key = "test";
|
||||||
|
for (let n = 0; n < totalMessages; ++n) {
|
||||||
|
let bufferIsFull = false;
|
||||||
|
do {
|
||||||
|
bufferIsFull = false;
|
||||||
|
try {
|
||||||
|
producer.produce(topic, -1, buffer, key, null, n);
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
// Based on config, and messages, this will execute once
|
||||||
|
if (error.code === Kafka.CODES.ERRORS.ERR__QUEUE_FULL) {
|
||||||
|
producer.poll();
|
||||||
|
// The wait introduces 11-12 seconds of latency when dr_cb is true
|
||||||
|
const start = process.hrtime();
|
||||||
|
await wait(50);
|
||||||
|
const latency = process.hrtime(start);
|
||||||
|
console.info(`Wait took ${latency[0]} seconds`);
|
||||||
|
bufferIsFull = true;
|
||||||
|
} else {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} while (bufferIsFull);
|
||||||
|
}
|
||||||
|
console.log("Finished producing");
|
||||||
|
};
|
||||||
|
|
||||||
|
const verifyReports = async (reports, reportsComplete, totalMessages) => {
|
||||||
|
const reportsTimeout = new Promise((resolve, reject) => {
|
||||||
|
setTimeout(() => {
|
||||||
|
reject("Delivery report timed out");
|
||||||
|
}, 10000);
|
||||||
|
});
|
||||||
|
await Promise.race([reportsComplete, reportsTimeout]);
|
||||||
|
await wait(500); // wait for some more delivery reports.
|
||||||
|
if (reports.length === totalMessages) {
|
||||||
|
console.log("Reports count match");
|
||||||
|
} else {
|
||||||
|
console.error("Reports count doesn't match");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for(let n = 0; n < totalMessages; ++n) {
|
||||||
|
if(reports[n].opaque !== n) {
|
||||||
|
console.error("Expect message number does not match");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const run = async () => {
|
||||||
|
const reports = [];
|
||||||
|
const totalMessages = 1000100;
|
||||||
|
const producer = new Kafka.Producer({
|
||||||
|
"batch.num.messages": 50000,
|
||||||
|
"compression.codec": "lz4",
|
||||||
|
"delivery.report.only.error": false,
|
||||||
|
"dr_cb": true,
|
||||||
|
"metadata.broker.list": "localhost:9092",
|
||||||
|
"message.send.max.retries": 10000000,
|
||||||
|
"queue.buffering.max.kbytes": 2000000,
|
||||||
|
"queue.buffering.max.messages": 1000000,
|
||||||
|
"queue.buffering.max.ms": 0,
|
||||||
|
"socket.keepalive.enable": true,
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
producer.setPollInterval(100);
|
||||||
|
producer.on("event.log", (obj) => console.log(obj));
|
||||||
|
const reportsComplete = new Promise((resolve) => {
|
||||||
|
producer.on("delivery-report", (err, report) => {
|
||||||
|
reports.push(report);
|
||||||
|
if(reports.length === totalMessages) {
|
||||||
|
resolve();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
const readyPromise = new Promise((resolve) => {
|
||||||
|
producer.on("ready", async () => {
|
||||||
|
console.log("Producer is ready");
|
||||||
|
resolve();
|
||||||
|
});
|
||||||
|
producer.connect();
|
||||||
|
});
|
||||||
|
await readyPromise;
|
||||||
|
|
||||||
|
await sendData(producer, totalMessages);
|
||||||
|
await verifyReports(reports, reportsComplete, totalMessages);
|
||||||
|
process.exit(0);
|
||||||
|
};
|
||||||
|
|
||||||
|
run().catch((err) => {
|
||||||
|
console.error(err);
|
||||||
|
});
|
9
win_install.bat
Normal file
9
win_install.bat
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
@echo off
|
||||||
|
choco install openssl.light
|
||||||
|
REM if /i %TRAVIS_NODE_VERSION% gtr 6 (
|
||||||
|
REM npm install --global --production windows-build-tools
|
||||||
|
REM ) else (
|
||||||
|
REM npm install --global --production windows-build-tools@3.1.0
|
||||||
|
REM )
|
||||||
|
|
||||||
|
choco install make
|
Loading…
x
Reference in New Issue
Block a user